1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
13 #include <linux/rtnetlink.h>
14 #include <linux/sockios.h>
15 #include <linux/ethtool.h>
18 #include <rte_malloc.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_common.h>
24 #include <rte_kvargs.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_string_fns.h>
28 #include <rte_alarm.h>
29 #include <rte_eal_paging.h>
31 #include <mlx5_glue.h>
32 #include <mlx5_devx_cmds.h>
33 #include <mlx5_common.h>
34 #include <mlx5_common_mp.h>
35 #include <mlx5_common_mr.h>
36 #include <mlx5_malloc.h>
38 #include "mlx5_defs.h"
40 #include "mlx5_common_os.h"
41 #include "mlx5_utils.h"
42 #include "mlx5_rxtx.h"
43 #include "mlx5_autoconf.h"
45 #include "mlx5_flow.h"
46 #include "rte_pmd_mlx5.h"
47 #include "mlx5_verbs.h"
49 #include "mlx5_devx.h"
51 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
53 #ifndef HAVE_IBV_MLX5_MOD_MPW
54 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
55 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
58 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
59 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
62 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
64 /* Spinlock for mlx5_shared_data allocation. */
65 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
67 /* Process local data for secondary processes. */
68 static struct mlx5_local_data mlx5_local_data;
71 * Set the completion channel file descriptor interrupt as non-blocking.
74 * Pointer to RQ channel object, which includes the channel fd
77 * The file descriptor (representing the intetrrupt) used in this channel.
80 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
83 mlx5_os_set_nonblock_channel_fd(int fd)
87 flags = fcntl(fd, F_GETFL);
88 return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
92 * Get mlx5 device attributes. The glue function query_device_ex() is called
93 * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
94 * device attributes from the glue out parameter.
97 * Pointer to ibv context.
100 * Pointer to mlx5 device attributes.
103 * 0 on success, non zero error number otherwise
106 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
109 struct ibv_device_attr_ex attr_ex;
110 memset(device_attr, 0, sizeof(*device_attr));
111 err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
115 device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
116 device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
117 device_attr->max_sge = attr_ex.orig_attr.max_sge;
118 device_attr->max_cq = attr_ex.orig_attr.max_cq;
119 device_attr->max_qp = attr_ex.orig_attr.max_qp;
120 device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
121 device_attr->max_rwq_indirection_table_size =
122 attr_ex.rss_caps.max_rwq_indirection_table_size;
123 device_attr->max_tso = attr_ex.tso_caps.max_tso;
124 device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
126 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
127 err = mlx5_glue->dv_query_device(ctx, &dv_attr);
131 device_attr->flags = dv_attr.flags;
132 device_attr->comp_mask = dv_attr.comp_mask;
133 #ifdef HAVE_IBV_MLX5_MOD_SWP
134 device_attr->sw_parsing_offloads =
135 dv_attr.sw_parsing_caps.sw_parsing_offloads;
137 device_attr->min_single_stride_log_num_of_bytes =
138 dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
139 device_attr->max_single_stride_log_num_of_bytes =
140 dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
141 device_attr->min_single_wqe_log_num_of_strides =
142 dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
143 device_attr->max_single_wqe_log_num_of_strides =
144 dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
145 device_attr->stride_supported_qpts =
146 dv_attr.striding_rq_caps.supported_qpts;
147 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
148 device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
155 * Verbs callback to allocate a memory. This function should allocate the space
156 * according to the size provided residing inside a huge page.
157 * Please note that all allocation must respect the alignment from libmlx5
158 * (i.e. currently rte_mem_page_size()).
161 * The size in bytes of the memory to allocate.
163 * A pointer to the callback data.
166 * Allocated buffer, NULL otherwise and rte_errno is set.
169 mlx5_alloc_verbs_buf(size_t size, void *data)
171 struct mlx5_dev_ctx_shared *sh = data;
173 size_t alignment = rte_mem_page_size();
174 if (alignment == (size_t)-1) {
175 DRV_LOG(ERR, "Failed to get mem page size");
180 MLX5_ASSERT(data != NULL);
181 ret = mlx5_malloc(0, size, alignment, sh->numa_node);
188 * Verbs callback to free a memory.
191 * A pointer to the memory to free.
193 * A pointer to the callback data.
196 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
198 MLX5_ASSERT(data != NULL);
203 * Initialize DR related data within private structure.
204 * Routine checks the reference counter and does actual
205 * resources creation/initialization only if counter is zero.
208 * Pointer to the private device data structure.
211 * Zero on success, positive error code otherwise.
214 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
216 struct mlx5_dev_ctx_shared *sh = priv->sh;
217 char s[MLX5_HLIST_NAMESIZE] __rte_unused;
220 MLX5_ASSERT(sh && sh->refcnt);
223 err = mlx5_alloc_table_hash_list(priv);
226 /* The resources below are only valid with DV support. */
227 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
228 /* Init port id action cache list. */
229 snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name);
230 mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh,
231 flow_dv_port_id_create_cb,
232 flow_dv_port_id_match_cb,
233 flow_dv_port_id_remove_cb);
234 /* Init push vlan action cache list. */
235 snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name);
236 mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh,
237 flow_dv_push_vlan_create_cb,
238 flow_dv_push_vlan_match_cb,
239 flow_dv_push_vlan_remove_cb);
240 /* Init sample action cache list. */
241 snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name);
242 mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh,
243 flow_dv_sample_create_cb,
244 flow_dv_sample_match_cb,
245 flow_dv_sample_remove_cb);
246 /* Init dest array action cache list. */
247 snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name);
248 mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh,
249 flow_dv_dest_array_create_cb,
250 flow_dv_dest_array_match_cb,
251 flow_dv_dest_array_remove_cb);
252 /* Create tags hash list table. */
253 snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
254 sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,
255 MLX5_HLIST_WRITE_MOST,
256 flow_dv_tag_create_cb,
257 flow_dv_tag_match_cb,
258 flow_dv_tag_remove_cb);
259 if (!sh->tag_table) {
260 DRV_LOG(ERR, "tags with hash creation failed.");
264 sh->tag_table->ctx = sh;
265 snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name);
266 sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
267 0, MLX5_HLIST_WRITE_MOST |
268 MLX5_HLIST_DIRECT_KEY,
269 flow_dv_modify_create_cb,
270 flow_dv_modify_match_cb,
271 flow_dv_modify_remove_cb);
272 if (!sh->modify_cmds) {
273 DRV_LOG(ERR, "hdr modify hash creation failed");
277 sh->modify_cmds->ctx = sh;
278 snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
279 sh->encaps_decaps = mlx5_hlist_create(s,
280 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
281 0, MLX5_HLIST_DIRECT_KEY |
282 MLX5_HLIST_WRITE_MOST,
283 flow_dv_encap_decap_create_cb,
284 flow_dv_encap_decap_match_cb,
285 flow_dv_encap_decap_remove_cb);
286 if (!sh->encaps_decaps) {
287 DRV_LOG(ERR, "encap decap hash creation failed");
291 sh->encaps_decaps->ctx = sh;
293 #ifdef HAVE_MLX5DV_DR
296 /* Reference counter is zero, we should initialize structures. */
297 domain = mlx5_glue->dr_create_domain(sh->ctx,
298 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
300 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
304 sh->rx_domain = domain;
305 domain = mlx5_glue->dr_create_domain(sh->ctx,
306 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
308 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
312 sh->tx_domain = domain;
313 #ifdef HAVE_MLX5DV_DR_ESWITCH
314 if (priv->config.dv_esw_en) {
315 domain = mlx5_glue->dr_create_domain
316 (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
318 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
322 sh->fdb_domain = domain;
323 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
327 err = mlx5_alloc_tunnel_hub(sh);
329 DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
332 if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
333 mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
334 mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
336 mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
338 sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
339 #endif /* HAVE_MLX5DV_DR */
340 sh->default_miss_action =
341 mlx5_glue->dr_create_flow_action_default_miss();
342 if (!sh->default_miss_action)
343 DRV_LOG(WARNING, "Default miss action is not supported.");
346 /* Rollback the created objects. */
348 mlx5_glue->dr_destroy_domain(sh->rx_domain);
349 sh->rx_domain = NULL;
352 mlx5_glue->dr_destroy_domain(sh->tx_domain);
353 sh->tx_domain = NULL;
355 if (sh->fdb_domain) {
356 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
357 sh->fdb_domain = NULL;
359 if (sh->esw_drop_action) {
360 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
361 sh->esw_drop_action = NULL;
363 if (sh->pop_vlan_action) {
364 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
365 sh->pop_vlan_action = NULL;
367 if (sh->encaps_decaps) {
368 mlx5_hlist_destroy(sh->encaps_decaps);
369 sh->encaps_decaps = NULL;
371 if (sh->modify_cmds) {
372 mlx5_hlist_destroy(sh->modify_cmds);
373 sh->modify_cmds = NULL;
376 /* tags should be destroyed with flow before. */
377 mlx5_hlist_destroy(sh->tag_table);
378 sh->tag_table = NULL;
380 if (sh->tunnel_hub) {
381 mlx5_release_tunnel_hub(sh, priv->dev_port);
382 sh->tunnel_hub = NULL;
384 mlx5_free_table_hash_list(priv);
389 * Destroy DR related data within private structure.
392 * Pointer to the private device data structure.
395 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
397 struct mlx5_dev_ctx_shared *sh = priv->sh;
399 MLX5_ASSERT(sh && sh->refcnt);
402 #ifdef HAVE_MLX5DV_DR
404 mlx5_glue->dr_destroy_domain(sh->rx_domain);
405 sh->rx_domain = NULL;
408 mlx5_glue->dr_destroy_domain(sh->tx_domain);
409 sh->tx_domain = NULL;
411 #ifdef HAVE_MLX5DV_DR_ESWITCH
412 if (sh->fdb_domain) {
413 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
414 sh->fdb_domain = NULL;
416 if (sh->esw_drop_action) {
417 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
418 sh->esw_drop_action = NULL;
421 if (sh->pop_vlan_action) {
422 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
423 sh->pop_vlan_action = NULL;
425 #endif /* HAVE_MLX5DV_DR */
426 if (sh->default_miss_action)
427 mlx5_glue->destroy_flow_action
428 (sh->default_miss_action);
429 if (sh->encaps_decaps) {
430 mlx5_hlist_destroy(sh->encaps_decaps);
431 sh->encaps_decaps = NULL;
433 if (sh->modify_cmds) {
434 mlx5_hlist_destroy(sh->modify_cmds);
435 sh->modify_cmds = NULL;
438 /* tags should be destroyed with flow before. */
439 mlx5_hlist_destroy(sh->tag_table);
440 sh->tag_table = NULL;
442 if (sh->tunnel_hub) {
443 mlx5_release_tunnel_hub(sh, priv->dev_port);
444 sh->tunnel_hub = NULL;
446 mlx5_cache_list_destroy(&sh->port_id_action_list);
447 mlx5_cache_list_destroy(&sh->push_vlan_action_list);
448 mlx5_free_table_hash_list(priv);
452 * Initialize shared data between primary and secondary process.
454 * A memzone is reserved by primary process and secondary processes attach to
458 * 0 on success, a negative errno value otherwise and rte_errno is set.
461 mlx5_init_shared_data(void)
463 const struct rte_memzone *mz;
466 rte_spinlock_lock(&mlx5_shared_data_lock);
467 if (mlx5_shared_data == NULL) {
468 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
469 /* Allocate shared memory. */
470 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
471 sizeof(*mlx5_shared_data),
475 "Cannot allocate mlx5 shared data");
479 mlx5_shared_data = mz->addr;
480 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
481 rte_spinlock_init(&mlx5_shared_data->lock);
483 /* Lookup allocated shared memory. */
484 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
487 "Cannot attach mlx5 shared data");
491 mlx5_shared_data = mz->addr;
492 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
496 rte_spinlock_unlock(&mlx5_shared_data_lock);
501 * PMD global initialization.
503 * Independent from individual device, this function initializes global
504 * per-PMD data structures distinguishing primary and secondary processes.
505 * Hence, each initialization is called once per a process.
508 * 0 on success, a negative errno value otherwise and rte_errno is set.
513 struct mlx5_shared_data *sd;
514 struct mlx5_local_data *ld = &mlx5_local_data;
517 if (mlx5_init_shared_data())
519 sd = mlx5_shared_data;
521 rte_spinlock_lock(&sd->lock);
522 switch (rte_eal_process_type()) {
523 case RTE_PROC_PRIMARY:
526 LIST_INIT(&sd->mem_event_cb_list);
527 rte_rwlock_init(&sd->mem_event_rwlock);
528 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
529 mlx5_mr_mem_event_cb, NULL);
530 ret = mlx5_mp_init_primary(MLX5_MP_NAME,
531 mlx5_mp_os_primary_handle);
534 sd->init_done = true;
536 case RTE_PROC_SECONDARY:
539 ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
540 mlx5_mp_os_secondary_handle);
544 ld->init_done = true;
550 rte_spinlock_unlock(&sd->lock);
555 * Create the Tx queue DevX/Verbs object.
558 * Pointer to Ethernet device.
560 * Queue index in DPDK Tx queue array.
563 * 0 on success, a negative errno value otherwise and rte_errno is set.
566 mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
568 struct mlx5_priv *priv = dev->data->dev_private;
569 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
570 struct mlx5_txq_ctrl *txq_ctrl =
571 container_of(txq_data, struct mlx5_txq_ctrl, txq);
573 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
574 return mlx5_txq_devx_obj_new(dev, idx);
575 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
576 if (!priv->config.dv_esw_en)
577 return mlx5_txq_devx_obj_new(dev, idx);
579 return mlx5_txq_ibv_obj_new(dev, idx);
583 * Release an Tx DevX/verbs queue object.
586 * DevX/Verbs Tx queue object.
589 mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj)
591 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
592 mlx5_txq_devx_obj_release(txq_obj);
595 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
596 if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) {
597 mlx5_txq_devx_obj_release(txq_obj);
601 mlx5_txq_ibv_obj_release(txq_obj);
605 * DV flow counter mode detect and config.
608 * Pointer to rte_eth_dev structure.
612 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
614 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
615 struct mlx5_priv *priv = dev->data->dev_private;
616 struct mlx5_dev_ctx_shared *sh = priv->sh;
619 #ifndef HAVE_IBV_DEVX_ASYNC
623 if (!priv->config.devx || !priv->config.dv_flow_en ||
624 !priv->config.hca_attr.flow_counters_dump ||
625 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
626 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
630 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
631 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
632 priv->config.hca_attr.flow_counters_dump,
633 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
634 /* Initialize fallback mode only on the port initializes sh. */
636 sh->cmng.counter_fallback = fallback;
637 else if (fallback != sh->cmng.counter_fallback)
638 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
639 "with others:%d.", PORT_ID(priv), fallback);
644 * Spawn an Ethernet device from Verbs information.
647 * Backing DPDK device.
649 * Verbs device parameters (name, port, switch_info) to spawn.
651 * Device configuration parameters.
654 * A valid Ethernet device object on success, NULL otherwise and rte_errno
655 * is set. The following errors are defined:
657 * EBUSY: device is not supposed to be spawned.
658 * EEXIST: device is already spawned
660 static struct rte_eth_dev *
661 mlx5_dev_spawn(struct rte_device *dpdk_dev,
662 struct mlx5_dev_spawn_data *spawn,
663 struct mlx5_dev_config *config)
665 const struct mlx5_switch_info *switch_info = &spawn->info;
666 struct mlx5_dev_ctx_shared *sh = NULL;
667 struct ibv_port_attr port_attr;
668 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
669 struct rte_eth_dev *eth_dev = NULL;
670 struct mlx5_priv *priv = NULL;
672 unsigned int hw_padding = 0;
674 unsigned int cqe_comp;
675 unsigned int cqe_pad = 0;
676 unsigned int tunnel_en = 0;
677 unsigned int mpls_en = 0;
678 unsigned int swp = 0;
679 unsigned int mprq = 0;
680 unsigned int mprq_min_stride_size_n = 0;
681 unsigned int mprq_max_stride_size_n = 0;
682 unsigned int mprq_min_stride_num_n = 0;
683 unsigned int mprq_max_stride_num_n = 0;
684 struct rte_ether_addr mac;
685 char name[RTE_ETH_NAME_MAX_LEN];
686 int own_domain_id = 0;
689 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
690 struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
693 /* Determine if this port representor is supposed to be spawned. */
694 if (switch_info->representor && dpdk_dev->devargs) {
695 struct rte_eth_devargs eth_da;
697 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da);
700 DRV_LOG(ERR, "failed to process device arguments: %s",
701 strerror(rte_errno));
704 for (i = 0; i < eth_da.nb_representor_ports; ++i)
705 if (eth_da.representor_ports[i] ==
706 (uint16_t)switch_info->port_name)
708 if (i == eth_da.nb_representor_ports) {
713 /* Build device name. */
714 if (spawn->pf_bond < 0) {
716 if (!switch_info->representor)
717 strlcpy(name, dpdk_dev->name, sizeof(name));
719 snprintf(name, sizeof(name), "%s_representor_%u",
720 dpdk_dev->name, switch_info->port_name);
722 /* Bonding device. */
723 if (!switch_info->representor)
724 snprintf(name, sizeof(name), "%s_%s",
726 mlx5_os_get_dev_device_name(spawn->phys_dev));
728 snprintf(name, sizeof(name), "%s_%s_representor_%u",
730 mlx5_os_get_dev_device_name(spawn->phys_dev),
731 switch_info->port_name);
733 /* check if the device is already spawned */
734 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
738 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
739 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
740 struct mlx5_mp_id mp_id;
742 eth_dev = rte_eth_dev_attach_secondary(name);
743 if (eth_dev == NULL) {
744 DRV_LOG(ERR, "can not attach rte ethdev");
748 priv = eth_dev->data->dev_private;
749 if (priv->sh->bond_dev != UINT16_MAX)
750 /* For bonding port, use primary PCI device. */
752 rte_eth_devices[priv->sh->bond_dev].device;
754 eth_dev->device = dpdk_dev;
755 eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
756 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
757 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
758 err = mlx5_proc_priv_init(eth_dev);
761 mp_id.port_id = eth_dev->data->port_id;
762 strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
763 /* Receive command fd from primary process */
764 err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
767 /* Remap UAR for Tx queues. */
768 err = mlx5_tx_uar_init_secondary(eth_dev, err);
772 * Ethdev pointer is still required as input since
773 * the primary device is not accessible from the
776 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
777 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
780 mlx5_dev_close(eth_dev);
784 * Some parameters ("tx_db_nc" in particularly) are needed in
785 * advance to create dv/verbs device context. We proceed the
786 * devargs here to get ones, and later proceed devargs again
787 * to override some hardware settings.
789 err = mlx5_args(config, dpdk_dev->devargs);
792 DRV_LOG(ERR, "failed to process device arguments: %s",
793 strerror(rte_errno));
796 if (config->dv_miss_info) {
797 if (switch_info->master || switch_info->representor)
798 config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
800 mlx5_malloc_mem_select(config->sys_mem_en);
801 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
804 config->devx = sh->devx;
805 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
806 config->dest_tir = 1;
808 #ifdef HAVE_IBV_MLX5_MOD_SWP
809 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
812 * Multi-packet send is supported by ConnectX-4 Lx PF as well
813 * as all ConnectX-5 devices.
815 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
816 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
818 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
819 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
821 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
822 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
823 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
824 DRV_LOG(DEBUG, "enhanced MPW is supported");
825 mps = MLX5_MPW_ENHANCED;
827 DRV_LOG(DEBUG, "MPW is supported");
831 DRV_LOG(DEBUG, "MPW isn't supported");
832 mps = MLX5_MPW_DISABLED;
834 #ifdef HAVE_IBV_MLX5_MOD_SWP
835 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
836 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
837 DRV_LOG(DEBUG, "SWP support: %u", swp);
840 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
841 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
842 struct mlx5dv_striding_rq_caps mprq_caps =
843 dv_attr.striding_rq_caps;
845 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
846 mprq_caps.min_single_stride_log_num_of_bytes);
847 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
848 mprq_caps.max_single_stride_log_num_of_bytes);
849 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
850 mprq_caps.min_single_wqe_log_num_of_strides);
851 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
852 mprq_caps.max_single_wqe_log_num_of_strides);
853 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
854 mprq_caps.supported_qpts);
855 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
857 mprq_min_stride_size_n =
858 mprq_caps.min_single_stride_log_num_of_bytes;
859 mprq_max_stride_size_n =
860 mprq_caps.max_single_stride_log_num_of_bytes;
861 mprq_min_stride_num_n =
862 mprq_caps.min_single_wqe_log_num_of_strides;
863 mprq_max_stride_num_n =
864 mprq_caps.max_single_wqe_log_num_of_strides;
867 if (RTE_CACHE_LINE_SIZE == 128 &&
868 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
872 config->cqe_comp = cqe_comp;
873 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
874 /* Whether device supports 128B Rx CQE padding. */
875 cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
876 (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
878 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
879 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
880 tunnel_en = ((dv_attr.tunnel_offloads_caps &
881 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
882 (dv_attr.tunnel_offloads_caps &
883 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
884 (dv_attr.tunnel_offloads_caps &
885 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
887 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
888 tunnel_en ? "" : "not ");
891 "tunnel offloading disabled due to old OFED/rdma-core version");
893 config->tunnel_en = tunnel_en;
894 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
895 mpls_en = ((dv_attr.tunnel_offloads_caps &
896 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
897 (dv_attr.tunnel_offloads_caps &
898 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
899 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
900 mpls_en ? "" : "not ");
902 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
903 " old OFED/rdma-core version or firmware configuration");
905 config->mpls_en = mpls_en;
906 /* Check port status. */
907 err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
909 DRV_LOG(ERR, "port query failed: %s", strerror(err));
912 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
913 DRV_LOG(ERR, "port is not configured in Ethernet mode");
917 if (port_attr.state != IBV_PORT_ACTIVE)
918 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
919 mlx5_glue->port_state_str(port_attr.state),
921 /* Allocate private eth device data. */
922 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
924 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
926 DRV_LOG(ERR, "priv allocation failure");
931 priv->dev_port = spawn->phys_port;
932 priv->pci_dev = spawn->pci_dev;
933 priv->mtu = RTE_ETHER_MTU;
934 priv->mp_id.port_id = port_id;
935 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
936 /* Some internal functions rely on Netlink sockets, open them now. */
937 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
938 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
939 priv->representor = !!switch_info->representor;
940 priv->master = !!switch_info->master;
941 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
942 priv->vport_meta_tag = 0;
943 priv->vport_meta_mask = 0;
944 priv->pf_bond = spawn->pf_bond;
945 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
947 * The DevX port query API is implemented. E-Switch may use
948 * either vport or reg_c[0] metadata register to match on
949 * vport index. The engaged part of metadata register is
952 if (switch_info->representor || switch_info->master) {
953 devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
954 MLX5DV_DEVX_PORT_MATCH_REG_C_0;
955 err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port,
959 "can't query devx port %d on device %s",
961 mlx5_os_get_dev_device_name(spawn->phys_dev));
962 devx_port.comp_mask = 0;
965 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
966 priv->vport_meta_tag = devx_port.reg_c_0.value;
967 priv->vport_meta_mask = devx_port.reg_c_0.mask;
968 if (!priv->vport_meta_mask) {
969 DRV_LOG(ERR, "vport zero mask for port %d"
970 " on bonding device %s",
972 mlx5_os_get_dev_device_name
977 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
978 DRV_LOG(ERR, "invalid vport tag for port %d"
979 " on bonding device %s",
981 mlx5_os_get_dev_device_name
987 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
988 priv->vport_id = devx_port.vport_num;
989 } else if (spawn->pf_bond >= 0) {
990 DRV_LOG(ERR, "can't deduce vport index for port %d"
991 " on bonding device %s",
993 mlx5_os_get_dev_device_name(spawn->phys_dev));
997 /* Suppose vport index in compatible way. */
998 priv->vport_id = switch_info->representor ?
999 switch_info->port_name + 1 : -1;
1003 * Kernel/rdma_core support single E-Switch per PF configurations
1004 * only and vport_id field contains the vport index for
1005 * associated VF, which is deduced from representor port name.
1006 * For example, let's have the IB device port 10, it has
1007 * attached network device eth0, which has port name attribute
1008 * pf0vf2, we can deduce the VF number as 2, and set vport index
1009 * as 3 (2+1). This assigning schema should be changed if the
1010 * multiple E-Switch instances per PF configurations or/and PCI
1011 * subfunctions are added.
1013 priv->vport_id = switch_info->representor ?
1014 switch_info->port_name + 1 : -1;
1016 /* representor_id field keeps the unmodified VF index. */
1017 priv->representor_id = switch_info->representor ?
1018 switch_info->port_name : -1;
1020 * Look for sibling devices in order to reuse their switch domain
1021 * if any, otherwise allocate one.
1023 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1024 const struct mlx5_priv *opriv =
1025 rte_eth_devices[port_id].data->dev_private;
1028 opriv->sh != priv->sh ||
1030 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1032 priv->domain_id = opriv->domain_id;
1035 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1036 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1039 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1040 strerror(rte_errno));
1045 /* Override some values set by hardware configuration. */
1046 mlx5_args(config, dpdk_dev->devargs);
1047 err = mlx5_dev_check_sibling_config(priv, config);
1050 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1051 IBV_DEVICE_RAW_IP_CSUM);
1052 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1053 (config->hw_csum ? "" : "not "));
1054 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1055 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1056 DRV_LOG(DEBUG, "counters are not supported");
1058 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
1059 if (config->dv_flow_en) {
1060 DRV_LOG(WARNING, "DV flow is not supported");
1061 config->dv_flow_en = 0;
1064 config->ind_table_max_size =
1065 sh->device_attr.max_rwq_indirection_table_size;
1067 * Remove this check once DPDK supports larger/variable
1068 * indirection tables.
1070 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1071 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1072 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1073 config->ind_table_max_size);
1074 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1075 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1076 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1077 (config->hw_vlan_strip ? "" : "not "));
1078 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1079 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1080 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1081 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1082 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1083 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1084 IBV_DEVICE_PCI_WRITE_END_PADDING);
1086 if (config->hw_padding && !hw_padding) {
1087 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1088 config->hw_padding = 0;
1089 } else if (config->hw_padding) {
1090 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1092 config->tso = (sh->device_attr.max_tso > 0 &&
1093 (sh->device_attr.tso_supported_qpts &
1094 (1 << IBV_QPT_RAW_PACKET)));
1096 config->tso_max_payload_sz = sh->device_attr.max_tso;
1098 * MPW is disabled by default, while the Enhanced MPW is enabled
1101 if (config->mps == MLX5_ARG_UNSET)
1102 config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1105 config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
1106 DRV_LOG(INFO, "%sMPS is %s",
1107 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
1108 config->mps == MLX5_MPW ? "legacy " : "",
1109 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1110 if (config->cqe_comp && !cqe_comp) {
1111 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1112 config->cqe_comp = 0;
1114 if (config->cqe_pad && !cqe_pad) {
1115 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
1116 config->cqe_pad = 0;
1117 } else if (config->cqe_pad) {
1118 DRV_LOG(INFO, "Rx CQE padding is enabled");
1121 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
1126 /* Check relax ordering support. */
1127 if (!haswell_broadwell_cpu) {
1128 sh->cmng.relaxed_ordering_write =
1129 config->hca_attr.relaxed_ordering_write;
1130 sh->cmng.relaxed_ordering_read =
1131 config->hca_attr.relaxed_ordering_read;
1133 sh->cmng.relaxed_ordering_read = 0;
1134 sh->cmng.relaxed_ordering_write = 0;
1136 /* Check for LRO support. */
1137 if (config->dest_tir && config->hca_attr.lro_cap &&
1138 config->dv_flow_en) {
1139 /* TBD check tunnel lro caps. */
1140 config->lro.supported = config->hca_attr.lro_cap;
1141 DRV_LOG(DEBUG, "Device supports LRO");
1143 * If LRO timeout is not configured by application,
1144 * use the minimal supported value.
1146 if (!config->lro.timeout)
1147 config->lro.timeout =
1148 config->hca_attr.lro_timer_supported_periods[0];
1149 DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
1150 config->lro.timeout);
1151 DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
1152 "required for coalescing is %d bytes",
1153 config->hca_attr.lro_min_mss_size);
1155 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
1156 if (config->hca_attr.qos.sup &&
1157 config->hca_attr.qos.srtcm_sup &&
1158 config->dv_flow_en) {
1159 uint8_t reg_c_mask =
1160 config->hca_attr.qos.flow_meter_reg_c_ids;
1162 * Meter needs two REG_C's for color match and pre-sfx
1163 * flow match. Here get the REG_C for color match.
1164 * REG_C_0 and REG_C_1 is reserved for metadata feature.
1167 if (__builtin_popcount(reg_c_mask) < 1) {
1169 DRV_LOG(WARNING, "No available register for"
1173 * The meter color register is used by the
1174 * flow-hit feature as well.
1175 * The flow-hit feature must use REG_C_3
1176 * Prefer REG_C_3 if it is available.
1178 if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
1179 priv->mtr_color_reg = REG_C_3;
1181 priv->mtr_color_reg = ffs(reg_c_mask)
1184 priv->mtr_reg_share =
1185 config->hca_attr.qos.flow_meter_reg_share;
1186 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
1187 priv->mtr_color_reg);
1191 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
1192 if (config->hca_attr.flow_hit_aso &&
1193 priv->mtr_color_reg == REG_C_3) {
1194 sh->flow_hit_aso_en = 1;
1195 err = mlx5_flow_aso_age_mng_init(sh);
1200 DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
1202 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
1203 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1204 if (config->hca_attr.log_max_ft_sampler_num > 0 &&
1205 config->dv_flow_en) {
1206 priv->sampler_en = 1;
1207 DRV_LOG(DEBUG, "The Sampler enabled!\n");
1209 priv->sampler_en = 0;
1210 if (!config->hca_attr.log_max_ft_sampler_num)
1211 DRV_LOG(WARNING, "No available register for"
1214 DRV_LOG(DEBUG, "DV flow is not supported!\n");
1218 if (config->tx_pp) {
1219 DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
1220 config->hca_attr.dev_freq_khz);
1221 DRV_LOG(DEBUG, "Packet pacing is %ssupported",
1222 config->hca_attr.qos.packet_pacing ? "" : "not ");
1223 DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
1224 config->hca_attr.cross_channel ? "" : "not ");
1225 DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
1226 config->hca_attr.wqe_index_ignore ? "" : "not ");
1227 DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
1228 config->hca_attr.non_wire_sq ? "" : "not ");
1229 DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
1230 config->hca_attr.log_max_static_sq_wq ? "" : "not ",
1231 config->hca_attr.log_max_static_sq_wq);
1232 DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
1233 config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
1234 if (!config->devx) {
1235 DRV_LOG(ERR, "DevX is required for packet pacing");
1239 if (!config->hca_attr.qos.packet_pacing) {
1240 DRV_LOG(ERR, "Packet pacing is not supported");
1244 if (!config->hca_attr.cross_channel) {
1245 DRV_LOG(ERR, "Cross channel operations are"
1246 " required for packet pacing");
1250 if (!config->hca_attr.wqe_index_ignore) {
1251 DRV_LOG(ERR, "WQE index ignore feature is"
1252 " required for packet pacing");
1256 if (!config->hca_attr.non_wire_sq) {
1257 DRV_LOG(ERR, "Non-wire SQ feature is"
1258 " required for packet pacing");
1262 if (!config->hca_attr.log_max_static_sq_wq) {
1263 DRV_LOG(ERR, "Static WQE SQ feature is"
1264 " required for packet pacing");
1268 if (!config->hca_attr.qos.wqe_rate_pp) {
1269 DRV_LOG(ERR, "WQE rate mode is required"
1270 " for packet pacing");
1274 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1275 DRV_LOG(ERR, "DevX does not provide UAR offset,"
1276 " can't create queues for packet pacing");
1282 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
1284 err = config->hca_attr.access_register_user ?
1285 mlx5_devx_cmd_register_read
1286 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
1287 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
1291 /* MTUTC register is read successfully. */
1292 ts_mode = MLX5_GET(register_mtutc, reg,
1294 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1295 config->rt_timestamp = 1;
1297 /* Kernel does not support register reading. */
1298 if (config->hca_attr.dev_freq_khz ==
1299 (NS_PER_S / MS_PER_S))
1300 config->rt_timestamp = 1;
1304 * If HW has bug working with tunnel packet decapsulation and
1305 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
1306 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1308 if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
1309 config->hw_fcs_strip = 0;
1310 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1311 (config->hw_fcs_strip ? "" : "not "));
1312 if (config->mprq.enabled && mprq) {
1313 if (config->mprq.stride_num_n &&
1314 (config->mprq.stride_num_n > mprq_max_stride_num_n ||
1315 config->mprq.stride_num_n < mprq_min_stride_num_n)) {
1316 config->mprq.stride_num_n =
1317 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1318 mprq_min_stride_num_n),
1319 mprq_max_stride_num_n);
1321 "the number of strides"
1322 " for Multi-Packet RQ is out of range,"
1323 " setting default value (%u)",
1324 1 << config->mprq.stride_num_n);
1326 if (config->mprq.stride_size_n &&
1327 (config->mprq.stride_size_n > mprq_max_stride_size_n ||
1328 config->mprq.stride_size_n < mprq_min_stride_size_n)) {
1329 config->mprq.stride_size_n =
1330 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
1331 mprq_min_stride_size_n),
1332 mprq_max_stride_size_n);
1334 "the size of a stride"
1335 " for Multi-Packet RQ is out of range,"
1336 " setting default value (%u)",
1337 1 << config->mprq.stride_size_n);
1339 config->mprq.min_stride_size_n = mprq_min_stride_size_n;
1340 config->mprq.max_stride_size_n = mprq_max_stride_size_n;
1341 } else if (config->mprq.enabled && !mprq) {
1342 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1343 config->mprq.enabled = 0;
1345 if (config->max_dump_files_num == 0)
1346 config->max_dump_files_num = 128;
1347 eth_dev = rte_eth_dev_allocate(name);
1348 if (eth_dev == NULL) {
1349 DRV_LOG(ERR, "can not allocate rte ethdev");
1353 if (priv->representor) {
1354 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1355 eth_dev->data->representor_id = priv->representor_id;
1358 * Store associated network device interface index. This index
1359 * is permanent throughout the lifetime of device. So, we may store
1360 * the ifindex here and use the cached value further.
1362 MLX5_ASSERT(spawn->ifindex);
1363 priv->if_index = spawn->ifindex;
1364 if (priv->pf_bond >= 0 && priv->master) {
1365 /* Get bond interface info */
1366 err = mlx5_sysfs_bond_info(priv->if_index,
1367 &priv->bond_ifindex,
1370 DRV_LOG(ERR, "unable to get bond info: %s",
1371 strerror(rte_errno));
1373 DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
1374 priv->if_index, priv->bond_ifindex,
1377 eth_dev->data->dev_private = priv;
1378 priv->dev_data = eth_dev->data;
1379 eth_dev->data->mac_addrs = priv->mac;
1380 if (spawn->pf_bond < 0) {
1381 eth_dev->device = dpdk_dev;
1383 /* Use primary bond PCI as device. */
1384 if (sh->bond_dev == UINT16_MAX) {
1385 sh->bond_dev = eth_dev->data->port_id;
1386 eth_dev->device = dpdk_dev;
1388 eth_dev->device = rte_eth_devices[sh->bond_dev].device;
1391 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1392 /* Configure the first MAC address by default. */
1393 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1395 "port %u cannot get MAC address, is mlx5_en"
1396 " loaded? (errno: %s)",
1397 eth_dev->data->port_id, strerror(rte_errno));
1402 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1403 eth_dev->data->port_id,
1404 mac.addr_bytes[0], mac.addr_bytes[1],
1405 mac.addr_bytes[2], mac.addr_bytes[3],
1406 mac.addr_bytes[4], mac.addr_bytes[5]);
1407 #ifdef RTE_LIBRTE_MLX5_DEBUG
1409 char ifname[IF_NAMESIZE];
1411 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1412 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1413 eth_dev->data->port_id, ifname);
1415 DRV_LOG(DEBUG, "port %u ifname is unknown",
1416 eth_dev->data->port_id);
1419 /* Get actual MTU if possible. */
1420 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1425 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1427 /* Initialize burst functions to prevent crashes before link-up. */
1428 eth_dev->rx_pkt_burst = removed_rx_burst;
1429 eth_dev->tx_pkt_burst = removed_tx_burst;
1430 eth_dev->dev_ops = &mlx5_os_dev_ops;
1431 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1432 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1433 eth_dev->rx_queue_count = mlx5_rx_queue_count;
1434 /* Register MAC address. */
1435 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1436 if (config->vf && config->vf_nl_en)
1437 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1438 mlx5_ifindex(eth_dev),
1439 eth_dev->data->mac_addrs,
1440 MLX5_MAX_MAC_ADDRESSES);
1442 priv->ctrl_flows = 0;
1443 rte_spinlock_init(&priv->flow_list_lock);
1444 TAILQ_INIT(&priv->flow_meters);
1445 TAILQ_INIT(&priv->flow_meter_profiles);
1446 /* Hint libmlx5 to use PMD allocator for data plane resources */
1447 mlx5_glue->dv_set_context_attr(sh->ctx,
1448 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1449 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
1450 .alloc = &mlx5_alloc_verbs_buf,
1451 .free = &mlx5_free_verbs_buf,
1454 /* Bring Ethernet device up. */
1455 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1456 eth_dev->data->port_id);
1457 mlx5_set_link_up(eth_dev);
1459 * Even though the interrupt handler is not installed yet,
1460 * interrupts will still trigger on the async_fd from
1461 * Verbs context returned by ibv_open_device().
1463 mlx5_link_update(eth_dev, 0);
1464 #ifdef HAVE_MLX5DV_DR_ESWITCH
1465 if (!(config->hca_attr.eswitch_manager && config->dv_flow_en &&
1466 (switch_info->representor || switch_info->master)))
1467 config->dv_esw_en = 0;
1469 config->dv_esw_en = 0;
1471 /* Detect minimal data bytes to inline. */
1472 mlx5_set_min_inline(spawn, config);
1473 /* Store device configuration on private structure. */
1474 priv->config = *config;
1475 /* Create context for virtual machine VLAN workaround. */
1476 priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1477 if (config->dv_flow_en) {
1478 err = mlx5_alloc_shared_dr(priv);
1482 if (config->devx && config->dv_flow_en && config->dest_tir) {
1483 priv->obj_ops = devx_obj_ops;
1484 priv->obj_ops.drop_action_create =
1485 ibv_obj_ops.drop_action_create;
1486 priv->obj_ops.drop_action_destroy =
1487 ibv_obj_ops.drop_action_destroy;
1488 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1489 priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
1491 if (config->dv_esw_en)
1492 priv->obj_ops.txq_obj_modify =
1493 ibv_obj_ops.txq_obj_modify;
1495 /* Use specific wrappers for Tx object. */
1496 priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
1497 priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
1500 priv->obj_ops = ibv_obj_ops;
1502 priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
1503 if (!priv->drop_queue.hrxq)
1505 /* Supported Verbs flow priority number detection. */
1506 err = mlx5_flow_discover_priorities(eth_dev);
1511 priv->config.flow_prio = err;
1512 if (!priv->config.dv_esw_en &&
1513 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1514 DRV_LOG(WARNING, "metadata mode %u is not supported "
1515 "(no E-Switch)", priv->config.dv_xmeta_en);
1516 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1518 mlx5_set_metadata_mask(eth_dev);
1519 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1520 !priv->sh->dv_regc0_mask) {
1521 DRV_LOG(ERR, "metadata mode %u is not supported "
1522 "(no metadata reg_c[0] is available)",
1523 priv->config.dv_xmeta_en);
1527 mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
1528 mlx5_hrxq_create_cb,
1530 mlx5_hrxq_remove_cb);
1531 /* Query availability of metadata reg_c's. */
1532 err = mlx5_flow_discover_mreg_c(eth_dev);
1537 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1539 "port %u extensive metadata register is not supported",
1540 eth_dev->data->port_id);
1541 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1542 DRV_LOG(ERR, "metadata mode %u is not supported "
1543 "(no metadata registers available)",
1544 priv->config.dv_xmeta_en);
1549 if (priv->config.dv_flow_en &&
1550 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1551 mlx5_flow_ext_mreg_supported(eth_dev) &&
1552 priv->sh->dv_regc0_mask) {
1553 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1554 MLX5_FLOW_MREG_HTABLE_SZ,
1556 flow_dv_mreg_create_cb,
1557 flow_dv_mreg_match_cb,
1558 flow_dv_mreg_remove_cb);
1559 if (!priv->mreg_cp_tbl) {
1563 priv->mreg_cp_tbl->ctx = eth_dev;
1565 rte_spinlock_init(&priv->shared_act_sl);
1566 mlx5_flow_counter_mode_config(eth_dev);
1567 if (priv->config.dv_flow_en)
1568 eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1572 if (priv->mreg_cp_tbl)
1573 mlx5_hlist_destroy(priv->mreg_cp_tbl);
1575 mlx5_os_free_shared_dr(priv);
1576 if (priv->nl_socket_route >= 0)
1577 close(priv->nl_socket_route);
1578 if (priv->nl_socket_rdma >= 0)
1579 close(priv->nl_socket_rdma);
1580 if (priv->vmwa_context)
1581 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1582 if (eth_dev && priv->drop_queue.hrxq)
1583 mlx5_drop_action_destroy(eth_dev);
1585 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1586 mlx5_cache_list_destroy(&priv->hrxqs);
1588 if (eth_dev != NULL)
1589 eth_dev->data->dev_private = NULL;
1591 if (eth_dev != NULL) {
1592 /* mac_addrs must not be freed alone because part of
1595 eth_dev->data->mac_addrs = NULL;
1596 rte_eth_dev_release_port(eth_dev);
1599 mlx5_free_shared_dev_ctx(sh);
1600 MLX5_ASSERT(err > 0);
1606 * Comparison callback to sort device data.
1608 * This is meant to be used with qsort().
1611 * Pointer to pointer to first data object.
1613 * Pointer to pointer to second data object.
1616 * 0 if both objects are equal, less than 0 if the first argument is less
1617 * than the second, greater than 0 otherwise.
1620 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1622 const struct mlx5_switch_info *si_a =
1623 &((const struct mlx5_dev_spawn_data *)a)->info;
1624 const struct mlx5_switch_info *si_b =
1625 &((const struct mlx5_dev_spawn_data *)b)->info;
1628 /* Master device first. */
1629 ret = si_b->master - si_a->master;
1632 /* Then representor devices. */
1633 ret = si_b->representor - si_a->representor;
1636 /* Unidentified devices come last in no specific order. */
1637 if (!si_a->representor)
1639 /* Order representors by name. */
1640 return si_a->port_name - si_b->port_name;
1644 * Match PCI information for possible slaves of bonding device.
1646 * @param[in] ibv_dev
1647 * Pointer to Infiniband device structure.
1648 * @param[in] pci_dev
1649 * Pointer to PCI device structure to match PCI address.
1650 * @param[in] nl_rdma
1651 * Netlink RDMA group socket handle.
1654 * negative value if no bonding device found, otherwise
1655 * positive index of slave PF in bonding.
1658 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
1659 const struct rte_pci_device *pci_dev,
1662 char ifname[IF_NAMESIZE + 1];
1663 unsigned int ifindex;
1669 * Try to get master device name. If something goes
1670 * wrong suppose the lack of kernel support and no
1675 if (!strstr(ibv_dev->name, "bond"))
1677 np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
1681 * The Master device might not be on the predefined
1682 * port (not on port index 1, it is not garanted),
1683 * we have to scan all Infiniband device port and
1686 for (i = 1; i <= np; ++i) {
1687 /* Check whether Infiniband port is populated. */
1688 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
1691 if (!if_indextoname(ifindex, ifname))
1693 /* Try to read bonding slave names from sysfs. */
1695 "/sys/class/net/%s/master/bonding/slaves", ifname);
1696 file = fopen(slaves, "r");
1702 /* Use safe format to check maximal buffer length. */
1703 MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1704 while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1705 char tmp_str[IF_NAMESIZE + 32];
1706 struct rte_pci_addr pci_addr;
1707 struct mlx5_switch_info info;
1709 /* Process slave interface names in the loop. */
1710 snprintf(tmp_str, sizeof(tmp_str),
1711 "/sys/class/net/%s", ifname);
1712 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
1713 DRV_LOG(WARNING, "can not get PCI address"
1714 " for netdev \"%s\"", ifname);
1717 if (pci_dev->addr.domain != pci_addr.domain ||
1718 pci_dev->addr.bus != pci_addr.bus ||
1719 pci_dev->addr.devid != pci_addr.devid ||
1720 pci_dev->addr.function != pci_addr.function)
1722 /* Slave interface PCI address match found. */
1724 snprintf(tmp_str, sizeof(tmp_str),
1725 "/sys/class/net/%s/phys_port_name", ifname);
1726 file = fopen(tmp_str, "rb");
1729 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1730 if (fscanf(file, "%32s", tmp_str) == 1)
1731 mlx5_translate_port_name(tmp_str, &info);
1732 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
1733 info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1734 pf = info.port_name;
1743 * DPDK callback to register a PCI device.
1745 * This function spawns Ethernet devices out of a given PCI device.
1747 * @param[in] pci_drv
1748 * PCI driver structure (mlx5_driver).
1749 * @param[in] pci_dev
1750 * PCI device information.
1753 * 0 on success, a negative errno value otherwise and rte_errno is set.
1756 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1757 struct rte_pci_device *pci_dev)
1759 struct ibv_device **ibv_list;
1761 * Number of found IB Devices matching with requested PCI BDF.
1762 * nd != 1 means there are multiple IB devices over the same
1763 * PCI device and we have representors and master.
1765 unsigned int nd = 0;
1767 * Number of found IB device Ports. nd = 1 and np = 1..n means
1768 * we have the single multiport IB device, and there may be
1769 * representors attached to some of found ports.
1771 unsigned int np = 0;
1773 * Number of DPDK ethernet devices to Spawn - either over
1774 * multiple IB devices or multiple ports of single IB device.
1775 * Actually this is the number of iterations to spawn.
1777 unsigned int ns = 0;
1780 * < 0 - no bonding device (single one)
1781 * >= 0 - bonding device (value is slave PF index)
1784 struct mlx5_dev_spawn_data *list = NULL;
1785 struct mlx5_dev_config dev_config;
1786 unsigned int dev_config_vf;
1789 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1790 mlx5_pmd_socket_init();
1791 ret = mlx5_init_once();
1793 DRV_LOG(ERR, "unable to init PMD global data: %s",
1794 strerror(rte_errno));
1798 ibv_list = mlx5_glue->get_device_list(&ret);
1800 rte_errno = errno ? errno : ENOSYS;
1801 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1805 * First scan the list of all Infiniband devices to find
1806 * matching ones, gathering into the list.
1808 struct ibv_device *ibv_match[ret + 1];
1809 int nl_route = mlx5_nl_init(NETLINK_ROUTE);
1810 int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1814 struct rte_pci_addr pci_addr;
1816 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1817 bd = mlx5_device_bond_pci_match
1818 (ibv_list[ret], pci_dev, nl_rdma);
1821 * Bonding device detected. Only one match is allowed,
1822 * the bonding is supported over multi-port IB device,
1823 * there should be no matches on representor PCI
1824 * functions or non VF LAG bonding devices with
1825 * specified address.
1829 "multiple PCI match on bonding device"
1830 "\"%s\" found", ibv_list[ret]->name);
1835 DRV_LOG(INFO, "PCI information matches for"
1836 " slave %d bonding device \"%s\"",
1837 bd, ibv_list[ret]->name);
1838 ibv_match[nd++] = ibv_list[ret];
1841 if (mlx5_dev_to_pci_addr
1842 (ibv_list[ret]->ibdev_path, &pci_addr))
1844 if (pci_dev->addr.domain != pci_addr.domain ||
1845 pci_dev->addr.bus != pci_addr.bus ||
1846 pci_dev->addr.devid != pci_addr.devid ||
1847 pci_dev->addr.function != pci_addr.function)
1849 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1850 ibv_list[ret]->name);
1851 ibv_match[nd++] = ibv_list[ret];
1853 ibv_match[nd] = NULL;
1855 /* No device matches, just complain and bail out. */
1857 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1858 " are kernel drivers loaded?",
1859 pci_dev->addr.domain, pci_dev->addr.bus,
1860 pci_dev->addr.devid, pci_dev->addr.function);
1867 * Found single matching device may have multiple ports.
1868 * Each port may be representor, we have to check the port
1869 * number and check the representors existence.
1872 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
1874 DRV_LOG(WARNING, "can not get IB device \"%s\""
1875 " ports number", ibv_match[0]->name);
1876 if (bd >= 0 && !np) {
1877 DRV_LOG(ERR, "can not get ports"
1878 " for bonding device");
1884 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
1887 * This may happen if there is VF LAG kernel support and
1888 * application is compiled with older rdma_core library.
1891 "No kernel/verbs support for VF LAG bonding found.");
1892 rte_errno = ENOTSUP;
1898 * Now we can determine the maximal
1899 * amount of devices to be spawned.
1901 list = mlx5_malloc(MLX5_MEM_ZERO,
1902 sizeof(struct mlx5_dev_spawn_data) *
1904 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1906 DRV_LOG(ERR, "spawn data array allocation failure");
1911 if (bd >= 0 || np > 1) {
1913 * Single IB device with multiple ports found,
1914 * it may be E-Switch master device and representors.
1915 * We have to perform identification through the ports.
1917 MLX5_ASSERT(nl_rdma >= 0);
1918 MLX5_ASSERT(ns == 0);
1919 MLX5_ASSERT(nd == 1);
1921 for (i = 1; i <= np; ++i) {
1922 list[ns].max_port = np;
1923 list[ns].phys_port = i;
1924 list[ns].phys_dev = ibv_match[0];
1925 list[ns].eth_dev = NULL;
1926 list[ns].pci_dev = pci_dev;
1927 list[ns].pf_bond = bd;
1928 list[ns].ifindex = mlx5_nl_ifindex
1930 mlx5_os_get_dev_device_name
1931 (list[ns].phys_dev), i);
1932 if (!list[ns].ifindex) {
1934 * No network interface index found for the
1935 * specified port, it means there is no
1936 * representor on this port. It's OK,
1937 * there can be disabled ports, for example
1938 * if sriov_numvfs < sriov_totalvfs.
1944 ret = mlx5_nl_switch_info
1948 if (ret || (!list[ns].info.representor &&
1949 !list[ns].info.master)) {
1951 * We failed to recognize representors with
1952 * Netlink, let's try to perform the task
1955 ret = mlx5_sysfs_switch_info
1959 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
1960 if (!ret && bd >= 0) {
1961 switch (list[ns].info.name_type) {
1962 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
1963 if (list[ns].info.port_name == bd)
1966 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
1968 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
1969 if (list[ns].info.pf_num == bd)
1978 if (!ret && (list[ns].info.representor ^
1979 list[ns].info.master))
1984 "unable to recognize master/representors"
1985 " on the IB device with multiple ports");
1992 * The existence of several matching entries (nd > 1) means
1993 * port representors have been instantiated. No existing Verbs
1994 * call nor sysfs entries can tell them apart, this can only
1995 * be done through Netlink calls assuming kernel drivers are
1996 * recent enough to support them.
1998 * In the event of identification failure through Netlink,
1999 * try again through sysfs, then:
2001 * 1. A single IB device matches (nd == 1) with single
2002 * port (np=0/1) and is not a representor, assume
2003 * no switch support.
2005 * 2. Otherwise no safe assumptions can be made;
2006 * complain louder and bail out.
2008 for (i = 0; i != nd; ++i) {
2009 memset(&list[ns].info, 0, sizeof(list[ns].info));
2010 list[ns].max_port = 1;
2011 list[ns].phys_port = 1;
2012 list[ns].phys_dev = ibv_match[i];
2013 list[ns].eth_dev = NULL;
2014 list[ns].pci_dev = pci_dev;
2015 list[ns].pf_bond = -1;
2016 list[ns].ifindex = 0;
2018 list[ns].ifindex = mlx5_nl_ifindex
2020 mlx5_os_get_dev_device_name
2021 (list[ns].phys_dev), 1);
2022 if (!list[ns].ifindex) {
2023 char ifname[IF_NAMESIZE];
2026 * Netlink failed, it may happen with old
2027 * ib_core kernel driver (before 4.16).
2028 * We can assume there is old driver because
2029 * here we are processing single ports IB
2030 * devices. Let's try sysfs to retrieve
2031 * the ifindex. The method works for
2032 * master device only.
2036 * Multiple devices found, assume
2037 * representors, can not distinguish
2038 * master/representor and retrieve
2039 * ifindex via sysfs.
2043 ret = mlx5_get_ifname_sysfs
2044 (ibv_match[i]->ibdev_path, ifname);
2047 if_nametoindex(ifname);
2048 if (!list[ns].ifindex) {
2050 * No network interface index found
2051 * for the specified device, it means
2052 * there it is neither representor
2060 ret = mlx5_nl_switch_info
2064 if (ret || (!list[ns].info.representor &&
2065 !list[ns].info.master)) {
2067 * We failed to recognize representors with
2068 * Netlink, let's try to perform the task
2071 ret = mlx5_sysfs_switch_info
2075 if (!ret && (list[ns].info.representor ^
2076 list[ns].info.master)) {
2078 } else if ((nd == 1) &&
2079 !list[ns].info.representor &&
2080 !list[ns].info.master) {
2082 * Single IB device with
2083 * one physical port and
2084 * attached network device.
2085 * May be SRIOV is not enabled
2086 * or there is no representors.
2088 DRV_LOG(INFO, "no E-Switch support detected");
2095 "unable to recognize master/representors"
2096 " on the multiple IB devices");
2104 * Sort list to probe devices in natural order for users convenience
2105 * (i.e. master first, then representors from lowest to highest ID).
2107 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2108 /* Device specific configuration. */
2109 switch (pci_dev->id.device_id) {
2110 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2111 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2112 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2113 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2114 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2115 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2116 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
2123 for (i = 0; i != ns; ++i) {
2126 /* Default configuration. */
2127 memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
2128 dev_config.vf = dev_config_vf;
2129 dev_config.mps = MLX5_ARG_UNSET;
2130 dev_config.dbnc = MLX5_ARG_UNSET;
2131 dev_config.rx_vec_en = 1;
2132 dev_config.txq_inline_max = MLX5_ARG_UNSET;
2133 dev_config.txq_inline_min = MLX5_ARG_UNSET;
2134 dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
2135 dev_config.txqs_inline = MLX5_ARG_UNSET;
2136 dev_config.vf_nl_en = 1;
2137 dev_config.mr_ext_memseg_en = 1;
2138 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2139 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2140 dev_config.dv_esw_en = 1;
2141 dev_config.dv_flow_en = 1;
2142 dev_config.decap_en = 1;
2143 dev_config.log_hp_size = MLX5_ARG_UNSET;
2144 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2147 if (!list[i].eth_dev) {
2148 if (rte_errno != EBUSY && rte_errno != EEXIST)
2150 /* Device is disabled or already spawned. Ignore it. */
2153 restore = list[i].eth_dev->data->dev_flags;
2154 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2155 /* Restore non-PCI flags cleared by the above call. */
2156 list[i].eth_dev->data->dev_flags |= restore;
2157 rte_eth_dev_probing_finish(list[i].eth_dev);
2161 "probe of PCI device " PCI_PRI_FMT " aborted after"
2162 " encountering an error: %s",
2163 pci_dev->addr.domain, pci_dev->addr.bus,
2164 pci_dev->addr.devid, pci_dev->addr.function,
2165 strerror(rte_errno));
2169 if (!list[i].eth_dev)
2171 mlx5_dev_close(list[i].eth_dev);
2172 /* mac_addrs must not be freed because in dev_private */
2173 list[i].eth_dev->data->mac_addrs = NULL;
2174 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2176 /* Restore original error. */
2183 * Do the routine cleanup:
2184 * - close opened Netlink sockets
2185 * - free allocated spawn data array
2186 * - free the Infiniband device list
2194 MLX5_ASSERT(ibv_list);
2195 mlx5_glue->free_device_list(ibv_list);
2200 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
2205 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2206 /* Get environment variable to store. */
2207 env = getenv(MLX5_SHUT_UP_BF);
2208 value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
2209 if (config->dbnc == MLX5_ARG_UNSET)
2210 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
2212 setenv(MLX5_SHUT_UP_BF,
2213 config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
2218 mlx5_restore_doorbell_mapping_env(int value)
2220 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2221 /* Restore the original environment variable state. */
2222 if (value == MLX5_ARG_UNSET)
2223 unsetenv(MLX5_SHUT_UP_BF);
2225 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
2229 * Extract pdn of PD object using DV API.
2232 * Pointer to the verbs PD object.
2234 * Pointer to the PD object number variable.
2237 * 0 on success, error value otherwise.
2240 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
2242 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2243 struct mlx5dv_obj obj;
2244 struct mlx5dv_pd pd_info;
2248 obj.pd.out = &pd_info;
2249 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
2251 DRV_LOG(DEBUG, "Fail to get PD object info");
2260 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
2264 * Function API to open IB device.
2266 * This function calls the Linux glue APIs to open a device.
2269 * Pointer to the IB device attributes (name, port, etc).
2270 * @param[out] config
2271 * Pointer to device configuration structure.
2273 * Pointer to shared context structure.
2276 * 0 on success, a positive error value otherwise.
2279 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
2280 const struct mlx5_dev_config *config,
2281 struct mlx5_dev_ctx_shared *sh)
2286 sh->numa_node = spawn->pci_dev->device.numa_node;
2287 pthread_mutex_init(&sh->txpp.mutex, NULL);
2289 * Configure environment variable "MLX5_BF_SHUT_UP"
2290 * before the device creation. The rdma_core library
2291 * checks the variable at device creation and
2292 * stores the result internally.
2294 dbmap_env = mlx5_config_doorbell_mapping_env(config);
2295 /* Try to open IB device with DV first, then usual Verbs. */
2297 sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
2300 DRV_LOG(DEBUG, "DevX is supported");
2301 /* The device is created, no need for environment. */
2302 mlx5_restore_doorbell_mapping_env(dbmap_env);
2304 /* The environment variable is still configured. */
2305 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
2306 err = errno ? errno : ENODEV;
2308 * The environment variable is not needed anymore,
2309 * all device creation attempts are completed.
2311 mlx5_restore_doorbell_mapping_env(dbmap_env);
2314 DRV_LOG(DEBUG, "DevX is NOT supported");
2317 if (!err && sh->ctx) {
2318 /* Hint libmlx5 to use PMD allocator for data plane resources */
2319 mlx5_glue->dv_set_context_attr(sh->ctx,
2320 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2321 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
2322 .alloc = &mlx5_alloc_verbs_buf,
2323 .free = &mlx5_free_verbs_buf,
2331 * Install shared asynchronous device events handler.
2332 * This function is implemented to support event sharing
2333 * between multiple ports of single IB device.
2336 * Pointer to mlx5_dev_ctx_shared object.
2339 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
2344 sh->intr_handle.fd = -1;
2345 flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
2346 ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
2347 F_SETFL, flags | O_NONBLOCK);
2349 DRV_LOG(INFO, "failed to change file descriptor async event"
2352 sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
2353 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
2354 if (rte_intr_callback_register(&sh->intr_handle,
2355 mlx5_dev_interrupt_handler, sh)) {
2356 DRV_LOG(INFO, "Fail to install the shared interrupt.");
2357 sh->intr_handle.fd = -1;
2361 #ifdef HAVE_IBV_DEVX_ASYNC
2362 sh->intr_handle_devx.fd = -1;
2364 (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
2365 struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
2367 DRV_LOG(INFO, "failed to allocate devx_comp.");
2370 flags = fcntl(devx_comp->fd, F_GETFL);
2371 ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
2373 DRV_LOG(INFO, "failed to change file descriptor"
2377 sh->intr_handle_devx.fd = devx_comp->fd;
2378 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
2379 if (rte_intr_callback_register(&sh->intr_handle_devx,
2380 mlx5_dev_interrupt_handler_devx, sh)) {
2381 DRV_LOG(INFO, "Fail to install the devx shared"
2383 sh->intr_handle_devx.fd = -1;
2385 #endif /* HAVE_IBV_DEVX_ASYNC */
2390 * Uninstall shared asynchronous device events handler.
2391 * This function is implemented to support event sharing
2392 * between multiple ports of single IB device.
2395 * Pointer to mlx5_dev_ctx_shared object.
2398 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
2400 if (sh->intr_handle.fd >= 0)
2401 mlx5_intr_callback_unregister(&sh->intr_handle,
2402 mlx5_dev_interrupt_handler, sh);
2403 #ifdef HAVE_IBV_DEVX_ASYNC
2404 if (sh->intr_handle_devx.fd >= 0)
2405 rte_intr_callback_unregister(&sh->intr_handle_devx,
2406 mlx5_dev_interrupt_handler_devx, sh);
2408 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
2413 * Read statistics by a named counter.
2416 * Pointer to the private device data structure.
2417 * @param[in] ctr_name
2418 * Pointer to the name of the statistic counter to read
2420 * Pointer to read statistic value.
2422 * 0 on success and stat is valud, 1 if failed to read the value
2427 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2433 MKSTR(path, "%s/ports/%d/hw_counters/%s",
2434 priv->sh->ibdev_path,
2437 fd = open(path, O_RDONLY);
2439 * in switchdev the file location is not per port
2440 * but rather in <ibdev_path>/hw_counters/<file_name>.
2443 MKSTR(path1, "%s/hw_counters/%s",
2444 priv->sh->ibdev_path,
2446 fd = open(path1, O_RDONLY);
2449 char buf[21] = {'\0'};
2450 ssize_t n = read(fd, buf, sizeof(buf));
2454 *stat = strtoull(buf, NULL, 10);
2464 * Set the reg_mr and dereg_mr call backs
2466 * @param reg_mr_cb[out]
2467 * Pointer to reg_mr func
2468 * @param dereg_mr_cb[out]
2469 * Pointer to dereg_mr func
2473 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
2474 mlx5_dereg_mr_t *dereg_mr_cb)
2476 *reg_mr_cb = mlx5_verbs_ops.reg_mr;
2477 *dereg_mr_cb = mlx5_verbs_ops.dereg_mr;
2481 * Remove a MAC address from device
2484 * Pointer to Ethernet device structure.
2486 * MAC address index.
2489 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2491 struct mlx5_priv *priv = dev->data->dev_private;
2492 const int vf = priv->config.vf;
2495 mlx5_nl_mac_addr_remove(priv->nl_socket_route,
2496 mlx5_ifindex(dev), priv->mac_own,
2497 &dev->data->mac_addrs[index], index);
2501 * Adds a MAC address to the device
2504 * Pointer to Ethernet device structure.
2506 * MAC address to register.
2508 * MAC address index.
2511 * 0 on success, a negative errno value otherwise
2514 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2517 struct mlx5_priv *priv = dev->data->dev_private;
2518 const int vf = priv->config.vf;
2522 ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
2523 mlx5_ifindex(dev), priv->mac_own,
2529 * Modify a VF MAC address
2532 * Pointer to device private data.
2534 * MAC address to modify into.
2536 * Net device interface index
2541 * 0 on success, a negative errno value otherwise
2544 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
2545 unsigned int iface_idx,
2546 struct rte_ether_addr *mac_addr,
2549 return mlx5_nl_vf_mac_addr_modify
2550 (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
2554 * Set device promiscuous mode
2557 * Pointer to Ethernet device structure.
2559 * 0 - promiscuous is disabled, otherwise - enabled
2562 * 0 on success, a negative error value otherwise
2565 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
2567 struct mlx5_priv *priv = dev->data->dev_private;
2569 return mlx5_nl_promisc(priv->nl_socket_route,
2570 mlx5_ifindex(dev), !!enable);
2574 * Set device promiscuous mode
2577 * Pointer to Ethernet device structure.
2579 * 0 - all multicase is disabled, otherwise - enabled
2582 * 0 on success, a negative error value otherwise
2585 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
2587 struct mlx5_priv *priv = dev->data->dev_private;
2589 return mlx5_nl_allmulti(priv->nl_socket_route,
2590 mlx5_ifindex(dev), !!enable);
2594 * Flush device MAC addresses
2597 * Pointer to Ethernet device structure.
2601 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
2603 struct mlx5_priv *priv = dev->data->dev_private;
2605 mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
2606 dev->data->mac_addrs,
2607 MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
2610 const struct eth_dev_ops mlx5_os_dev_ops = {
2611 .dev_configure = mlx5_dev_configure,
2612 .dev_start = mlx5_dev_start,
2613 .dev_stop = mlx5_dev_stop,
2614 .dev_set_link_down = mlx5_set_link_down,
2615 .dev_set_link_up = mlx5_set_link_up,
2616 .dev_close = mlx5_dev_close,
2617 .promiscuous_enable = mlx5_promiscuous_enable,
2618 .promiscuous_disable = mlx5_promiscuous_disable,
2619 .allmulticast_enable = mlx5_allmulticast_enable,
2620 .allmulticast_disable = mlx5_allmulticast_disable,
2621 .link_update = mlx5_link_update,
2622 .stats_get = mlx5_stats_get,
2623 .stats_reset = mlx5_stats_reset,
2624 .xstats_get = mlx5_xstats_get,
2625 .xstats_reset = mlx5_xstats_reset,
2626 .xstats_get_names = mlx5_xstats_get_names,
2627 .fw_version_get = mlx5_fw_version_get,
2628 .dev_infos_get = mlx5_dev_infos_get,
2629 .read_clock = mlx5_txpp_read_clock,
2630 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2631 .vlan_filter_set = mlx5_vlan_filter_set,
2632 .rx_queue_setup = mlx5_rx_queue_setup,
2633 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2634 .tx_queue_setup = mlx5_tx_queue_setup,
2635 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2636 .rx_queue_release = mlx5_rx_queue_release,
2637 .tx_queue_release = mlx5_tx_queue_release,
2638 .rx_queue_start = mlx5_rx_queue_start,
2639 .rx_queue_stop = mlx5_rx_queue_stop,
2640 .tx_queue_start = mlx5_tx_queue_start,
2641 .tx_queue_stop = mlx5_tx_queue_stop,
2642 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2643 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2644 .mac_addr_remove = mlx5_mac_addr_remove,
2645 .mac_addr_add = mlx5_mac_addr_add,
2646 .mac_addr_set = mlx5_mac_addr_set,
2647 .set_mc_addr_list = mlx5_set_mc_addr_list,
2648 .mtu_set = mlx5_dev_set_mtu,
2649 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2650 .vlan_offload_set = mlx5_vlan_offload_set,
2651 .reta_update = mlx5_dev_rss_reta_update,
2652 .reta_query = mlx5_dev_rss_reta_query,
2653 .rss_hash_update = mlx5_rss_hash_update,
2654 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
2655 .filter_ctrl = mlx5_dev_filter_ctrl,
2656 .rxq_info_get = mlx5_rxq_info_get,
2657 .txq_info_get = mlx5_txq_info_get,
2658 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2659 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2660 .rx_queue_intr_enable = mlx5_rx_intr_enable,
2661 .rx_queue_intr_disable = mlx5_rx_intr_disable,
2662 .is_removed = mlx5_is_removed,
2663 .udp_tunnel_port_add = mlx5_udp_tunnel_port_add,
2664 .get_module_info = mlx5_get_module_info,
2665 .get_module_eeprom = mlx5_get_module_eeprom,
2666 .hairpin_cap_get = mlx5_hairpin_cap_get,
2667 .mtr_ops_get = mlx5_flow_meter_ops_get,
2668 .hairpin_bind = mlx5_hairpin_bind,
2669 .hairpin_unbind = mlx5_hairpin_unbind,
2670 .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2671 .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2672 .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2673 .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
2676 /* Available operations from secondary process. */
2677 const struct eth_dev_ops mlx5_os_dev_sec_ops = {
2678 .stats_get = mlx5_stats_get,
2679 .stats_reset = mlx5_stats_reset,
2680 .xstats_get = mlx5_xstats_get,
2681 .xstats_reset = mlx5_xstats_reset,
2682 .xstats_get_names = mlx5_xstats_get_names,
2683 .fw_version_get = mlx5_fw_version_get,
2684 .dev_infos_get = mlx5_dev_infos_get,
2685 .read_clock = mlx5_txpp_read_clock,
2686 .rx_queue_start = mlx5_rx_queue_start,
2687 .rx_queue_stop = mlx5_rx_queue_stop,
2688 .tx_queue_start = mlx5_tx_queue_start,
2689 .tx_queue_stop = mlx5_tx_queue_stop,
2690 .rxq_info_get = mlx5_rxq_info_get,
2691 .txq_info_get = mlx5_txq_info_get,
2692 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2693 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2694 .get_module_info = mlx5_get_module_info,
2695 .get_module_eeprom = mlx5_get_module_eeprom,
2698 /* Available operations in flow isolated mode. */
2699 const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
2700 .dev_configure = mlx5_dev_configure,
2701 .dev_start = mlx5_dev_start,
2702 .dev_stop = mlx5_dev_stop,
2703 .dev_set_link_down = mlx5_set_link_down,
2704 .dev_set_link_up = mlx5_set_link_up,
2705 .dev_close = mlx5_dev_close,
2706 .promiscuous_enable = mlx5_promiscuous_enable,
2707 .promiscuous_disable = mlx5_promiscuous_disable,
2708 .allmulticast_enable = mlx5_allmulticast_enable,
2709 .allmulticast_disable = mlx5_allmulticast_disable,
2710 .link_update = mlx5_link_update,
2711 .stats_get = mlx5_stats_get,
2712 .stats_reset = mlx5_stats_reset,
2713 .xstats_get = mlx5_xstats_get,
2714 .xstats_reset = mlx5_xstats_reset,
2715 .xstats_get_names = mlx5_xstats_get_names,
2716 .fw_version_get = mlx5_fw_version_get,
2717 .dev_infos_get = mlx5_dev_infos_get,
2718 .read_clock = mlx5_txpp_read_clock,
2719 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2720 .vlan_filter_set = mlx5_vlan_filter_set,
2721 .rx_queue_setup = mlx5_rx_queue_setup,
2722 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2723 .tx_queue_setup = mlx5_tx_queue_setup,
2724 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2725 .rx_queue_release = mlx5_rx_queue_release,
2726 .tx_queue_release = mlx5_tx_queue_release,
2727 .rx_queue_start = mlx5_rx_queue_start,
2728 .rx_queue_stop = mlx5_rx_queue_stop,
2729 .tx_queue_start = mlx5_tx_queue_start,
2730 .tx_queue_stop = mlx5_tx_queue_stop,
2731 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2732 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2733 .mac_addr_remove = mlx5_mac_addr_remove,
2734 .mac_addr_add = mlx5_mac_addr_add,
2735 .mac_addr_set = mlx5_mac_addr_set,
2736 .set_mc_addr_list = mlx5_set_mc_addr_list,
2737 .mtu_set = mlx5_dev_set_mtu,
2738 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2739 .vlan_offload_set = mlx5_vlan_offload_set,
2740 .filter_ctrl = mlx5_dev_filter_ctrl,
2741 .rxq_info_get = mlx5_rxq_info_get,
2742 .txq_info_get = mlx5_txq_info_get,
2743 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2744 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2745 .rx_queue_intr_enable = mlx5_rx_intr_enable,
2746 .rx_queue_intr_disable = mlx5_rx_intr_disable,
2747 .is_removed = mlx5_is_removed,
2748 .get_module_info = mlx5_get_module_info,
2749 .get_module_eeprom = mlx5_get_module_eeprom,
2750 .hairpin_cap_get = mlx5_hairpin_cap_get,
2751 .mtr_ops_get = mlx5_flow_meter_ops_get,
2752 .hairpin_bind = mlx5_hairpin_bind,
2753 .hairpin_unbind = mlx5_hairpin_unbind,
2754 .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
2755 .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
2756 .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
2757 .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,