1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
13 #include <linux/rtnetlink.h>
14 #include <linux/sockios.h>
15 #include <linux/ethtool.h>
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_kvargs.h>
35 #include <rte_rwlock.h>
36 #include <rte_spinlock.h>
37 #include <rte_string_fns.h>
38 #include <rte_alarm.h>
39 #include <rte_eal_paging.h>
41 #include <mlx5_glue.h>
42 #include <mlx5_devx_cmds.h>
43 #include <mlx5_common.h>
44 #include <mlx5_common_mp.h>
45 #include <mlx5_common_mr.h>
46 #include <mlx5_malloc.h>
48 #include "mlx5_defs.h"
50 #include "mlx5_common_os.h"
51 #include "mlx5_utils.h"
52 #include "mlx5_rxtx.h"
53 #include "mlx5_autoconf.h"
55 #include "mlx5_flow.h"
56 #include "rte_pmd_mlx5.h"
57 #include "mlx5_verbs.h"
59 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
61 #ifndef HAVE_IBV_MLX5_MOD_MPW
62 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
63 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
66 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
67 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
70 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
72 /* Spinlock for mlx5_shared_data allocation. */
73 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
75 /* Process local data for secondary processes. */
76 static struct mlx5_local_data mlx5_local_data;
79 * Get mlx5 device attributes. The glue function query_device_ex() is called
80 * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
81 * device attributes from the glue out parameter.
84 * Pointer to ibv context.
87 * Pointer to mlx5 device attributes.
90 * 0 on success, non zero error number otherwise
93 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
96 struct ibv_device_attr_ex attr_ex;
97 memset(device_attr, 0, sizeof(*device_attr));
98 err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
102 device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
103 device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
104 device_attr->max_sge = attr_ex.orig_attr.max_sge;
105 device_attr->max_cq = attr_ex.orig_attr.max_cq;
106 device_attr->max_qp = attr_ex.orig_attr.max_qp;
107 device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
108 device_attr->max_rwq_indirection_table_size =
109 attr_ex.rss_caps.max_rwq_indirection_table_size;
110 device_attr->max_tso = attr_ex.tso_caps.max_tso;
111 device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
113 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
114 err = mlx5_glue->dv_query_device(ctx, &dv_attr);
118 device_attr->flags = dv_attr.flags;
119 device_attr->comp_mask = dv_attr.comp_mask;
120 #ifdef HAVE_IBV_MLX5_MOD_SWP
121 device_attr->sw_parsing_offloads =
122 dv_attr.sw_parsing_caps.sw_parsing_offloads;
124 device_attr->min_single_stride_log_num_of_bytes =
125 dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
126 device_attr->max_single_stride_log_num_of_bytes =
127 dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
128 device_attr->min_single_wqe_log_num_of_strides =
129 dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
130 device_attr->max_single_wqe_log_num_of_strides =
131 dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
132 device_attr->stride_supported_qpts =
133 dv_attr.striding_rq_caps.supported_qpts;
134 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
135 device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
142 * Verbs callback to allocate a memory. This function should allocate the space
143 * according to the size provided residing inside a huge page.
144 * Please note that all allocation must respect the alignment from libmlx5
145 * (i.e. currently rte_mem_page_size()).
148 * The size in bytes of the memory to allocate.
150 * A pointer to the callback data.
153 * Allocated buffer, NULL otherwise and rte_errno is set.
156 mlx5_alloc_verbs_buf(size_t size, void *data)
158 struct mlx5_priv *priv = data;
160 unsigned int socket = SOCKET_ID_ANY;
161 size_t alignment = rte_mem_page_size();
162 if (alignment == (size_t)-1) {
163 DRV_LOG(ERR, "Failed to get mem page size");
168 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
169 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
171 socket = ctrl->socket;
172 } else if (priv->verbs_alloc_ctx.type ==
173 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
174 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
176 socket = ctrl->socket;
178 MLX5_ASSERT(data != NULL);
179 ret = mlx5_malloc(0, size, alignment, socket);
186 * Verbs callback to free a memory.
189 * A pointer to the memory to free.
191 * A pointer to the callback data.
194 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
196 MLX5_ASSERT(data != NULL);
201 * Initialize DR related data within private structure.
202 * Routine checks the reference counter and does actual
203 * resources creation/initialization only if counter is zero.
206 * Pointer to the private device data structure.
209 * Zero on success, positive error code otherwise.
212 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
214 struct mlx5_dev_ctx_shared *sh = priv->sh;
215 char s[MLX5_HLIST_NAMESIZE];
219 err = mlx5_alloc_table_hash_list(priv);
221 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
222 (void *)sh->flow_tbls);
225 /* Create tags hash list table. */
226 snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
227 sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
228 if (!sh->tag_table) {
229 DRV_LOG(ERR, "tags with hash creation failed.");
233 #ifdef HAVE_MLX5DV_DR
237 /* Shared DV/DR structures is already initialized. */
242 /* Reference counter is zero, we should initialize structures. */
243 domain = mlx5_glue->dr_create_domain(sh->ctx,
244 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
246 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
250 sh->rx_domain = domain;
251 domain = mlx5_glue->dr_create_domain(sh->ctx,
252 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
254 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
258 pthread_mutex_init(&sh->dv_mutex, NULL);
259 sh->tx_domain = domain;
260 #ifdef HAVE_MLX5DV_DR_ESWITCH
261 if (priv->config.dv_esw_en) {
262 domain = mlx5_glue->dr_create_domain
263 (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
265 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
269 sh->fdb_domain = domain;
270 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
273 if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
274 mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
275 mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
277 mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
279 sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
280 #endif /* HAVE_MLX5DV_DR */
285 /* Rollback the created objects. */
287 mlx5_glue->dr_destroy_domain(sh->rx_domain);
288 sh->rx_domain = NULL;
291 mlx5_glue->dr_destroy_domain(sh->tx_domain);
292 sh->tx_domain = NULL;
294 if (sh->fdb_domain) {
295 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
296 sh->fdb_domain = NULL;
298 if (sh->esw_drop_action) {
299 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
300 sh->esw_drop_action = NULL;
302 if (sh->pop_vlan_action) {
303 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
304 sh->pop_vlan_action = NULL;
307 /* tags should be destroyed with flow before. */
308 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
309 sh->tag_table = NULL;
311 mlx5_free_table_hash_list(priv);
316 * Destroy DR related data within private structure.
319 * Pointer to the private device data structure.
322 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
324 struct mlx5_dev_ctx_shared *sh;
326 if (!priv->dr_shared)
331 #ifdef HAVE_MLX5DV_DR
332 MLX5_ASSERT(sh->dv_refcnt);
333 if (sh->dv_refcnt && --sh->dv_refcnt)
336 mlx5_glue->dr_destroy_domain(sh->rx_domain);
337 sh->rx_domain = NULL;
340 mlx5_glue->dr_destroy_domain(sh->tx_domain);
341 sh->tx_domain = NULL;
343 #ifdef HAVE_MLX5DV_DR_ESWITCH
344 if (sh->fdb_domain) {
345 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
346 sh->fdb_domain = NULL;
348 if (sh->esw_drop_action) {
349 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
350 sh->esw_drop_action = NULL;
353 if (sh->pop_vlan_action) {
354 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
355 sh->pop_vlan_action = NULL;
357 pthread_mutex_destroy(&sh->dv_mutex);
358 #endif /* HAVE_MLX5DV_DR */
360 /* tags should be destroyed with flow before. */
361 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
362 sh->tag_table = NULL;
364 mlx5_free_table_hash_list(priv);
368 * Initialize shared data between primary and secondary process.
370 * A memzone is reserved by primary process and secondary processes attach to
374 * 0 on success, a negative errno value otherwise and rte_errno is set.
377 mlx5_init_shared_data(void)
379 const struct rte_memzone *mz;
382 rte_spinlock_lock(&mlx5_shared_data_lock);
383 if (mlx5_shared_data == NULL) {
384 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
385 /* Allocate shared memory. */
386 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
387 sizeof(*mlx5_shared_data),
391 "Cannot allocate mlx5 shared data");
395 mlx5_shared_data = mz->addr;
396 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
397 rte_spinlock_init(&mlx5_shared_data->lock);
399 /* Lookup allocated shared memory. */
400 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
403 "Cannot attach mlx5 shared data");
407 mlx5_shared_data = mz->addr;
408 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
412 rte_spinlock_unlock(&mlx5_shared_data_lock);
417 * PMD global initialization.
419 * Independent from individual device, this function initializes global
420 * per-PMD data structures distinguishing primary and secondary processes.
421 * Hence, each initialization is called once per a process.
424 * 0 on success, a negative errno value otherwise and rte_errno is set.
429 struct mlx5_shared_data *sd;
430 struct mlx5_local_data *ld = &mlx5_local_data;
433 if (mlx5_init_shared_data())
435 sd = mlx5_shared_data;
437 rte_spinlock_lock(&sd->lock);
438 switch (rte_eal_process_type()) {
439 case RTE_PROC_PRIMARY:
442 LIST_INIT(&sd->mem_event_cb_list);
443 rte_rwlock_init(&sd->mem_event_rwlock);
444 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
445 mlx5_mr_mem_event_cb, NULL);
446 ret = mlx5_mp_init_primary(MLX5_MP_NAME,
447 mlx5_mp_os_primary_handle);
450 sd->init_done = true;
452 case RTE_PROC_SECONDARY:
455 ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
456 mlx5_mp_os_secondary_handle);
460 ld->init_done = true;
466 rte_spinlock_unlock(&sd->lock);
471 * Spawn an Ethernet device from Verbs information.
474 * Backing DPDK device.
476 * Verbs device parameters (name, port, switch_info) to spawn.
478 * Device configuration parameters.
481 * A valid Ethernet device object on success, NULL otherwise and rte_errno
482 * is set. The following errors are defined:
484 * EBUSY: device is not supposed to be spawned.
485 * EEXIST: device is already spawned
487 static struct rte_eth_dev *
488 mlx5_dev_spawn(struct rte_device *dpdk_dev,
489 struct mlx5_dev_spawn_data *spawn,
490 struct mlx5_dev_config config)
492 const struct mlx5_switch_info *switch_info = &spawn->info;
493 struct mlx5_dev_ctx_shared *sh = NULL;
494 struct ibv_port_attr port_attr;
495 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
496 struct rte_eth_dev *eth_dev = NULL;
497 struct mlx5_priv *priv = NULL;
499 unsigned int hw_padding = 0;
501 unsigned int cqe_comp;
502 unsigned int cqe_pad = 0;
503 unsigned int tunnel_en = 0;
504 unsigned int mpls_en = 0;
505 unsigned int swp = 0;
506 unsigned int mprq = 0;
507 unsigned int mprq_min_stride_size_n = 0;
508 unsigned int mprq_max_stride_size_n = 0;
509 unsigned int mprq_min_stride_num_n = 0;
510 unsigned int mprq_max_stride_num_n = 0;
511 struct rte_ether_addr mac;
512 char name[RTE_ETH_NAME_MAX_LEN];
513 int own_domain_id = 0;
516 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
517 struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
520 /* Determine if this port representor is supposed to be spawned. */
521 if (switch_info->representor && dpdk_dev->devargs) {
522 struct rte_eth_devargs eth_da;
524 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da);
527 DRV_LOG(ERR, "failed to process device arguments: %s",
528 strerror(rte_errno));
531 for (i = 0; i < eth_da.nb_representor_ports; ++i)
532 if (eth_da.representor_ports[i] ==
533 (uint16_t)switch_info->port_name)
535 if (i == eth_da.nb_representor_ports) {
540 /* Build device name. */
541 if (spawn->pf_bond < 0) {
543 if (!switch_info->representor)
544 strlcpy(name, dpdk_dev->name, sizeof(name));
546 snprintf(name, sizeof(name), "%s_representor_%u",
547 dpdk_dev->name, switch_info->port_name);
549 /* Bonding device. */
550 if (!switch_info->representor)
551 snprintf(name, sizeof(name), "%s_%s",
553 mlx5_os_get_dev_device_name(spawn->phys_dev));
555 snprintf(name, sizeof(name), "%s_%s_representor_%u",
557 mlx5_os_get_dev_device_name(spawn->phys_dev),
558 switch_info->port_name);
560 /* check if the device is already spawned */
561 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
565 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
566 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
567 struct mlx5_mp_id mp_id;
569 eth_dev = rte_eth_dev_attach_secondary(name);
570 if (eth_dev == NULL) {
571 DRV_LOG(ERR, "can not attach rte ethdev");
575 eth_dev->device = dpdk_dev;
576 eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
577 err = mlx5_proc_priv_init(eth_dev);
580 mp_id.port_id = eth_dev->data->port_id;
581 strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
582 /* Receive command fd from primary process */
583 err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
586 /* Remap UAR for Tx queues. */
587 err = mlx5_tx_uar_init_secondary(eth_dev, err);
591 * Ethdev pointer is still required as input since
592 * the primary device is not accessible from the
595 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
596 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
599 mlx5_dev_close(eth_dev);
603 * Some parameters ("tx_db_nc" in particularly) are needed in
604 * advance to create dv/verbs device context. We proceed the
605 * devargs here to get ones, and later proceed devargs again
606 * to override some hardware settings.
608 err = mlx5_args(&config, dpdk_dev->devargs);
611 DRV_LOG(ERR, "failed to process device arguments: %s",
612 strerror(rte_errno));
615 mlx5_malloc_mem_select(config.sys_mem_en);
616 sh = mlx5_alloc_shared_dev_ctx(spawn, &config);
619 config.devx = sh->devx;
620 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
623 #ifdef HAVE_IBV_MLX5_MOD_SWP
624 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
627 * Multi-packet send is supported by ConnectX-4 Lx PF as well
628 * as all ConnectX-5 devices.
630 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
631 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
633 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
634 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
636 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
637 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
638 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
639 DRV_LOG(DEBUG, "enhanced MPW is supported");
640 mps = MLX5_MPW_ENHANCED;
642 DRV_LOG(DEBUG, "MPW is supported");
646 DRV_LOG(DEBUG, "MPW isn't supported");
647 mps = MLX5_MPW_DISABLED;
649 #ifdef HAVE_IBV_MLX5_MOD_SWP
650 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
651 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
652 DRV_LOG(DEBUG, "SWP support: %u", swp);
655 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
656 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
657 struct mlx5dv_striding_rq_caps mprq_caps =
658 dv_attr.striding_rq_caps;
660 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
661 mprq_caps.min_single_stride_log_num_of_bytes);
662 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
663 mprq_caps.max_single_stride_log_num_of_bytes);
664 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
665 mprq_caps.min_single_wqe_log_num_of_strides);
666 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
667 mprq_caps.max_single_wqe_log_num_of_strides);
668 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
669 mprq_caps.supported_qpts);
670 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
672 mprq_min_stride_size_n =
673 mprq_caps.min_single_stride_log_num_of_bytes;
674 mprq_max_stride_size_n =
675 mprq_caps.max_single_stride_log_num_of_bytes;
676 mprq_min_stride_num_n =
677 mprq_caps.min_single_wqe_log_num_of_strides;
678 mprq_max_stride_num_n =
679 mprq_caps.max_single_wqe_log_num_of_strides;
682 if (RTE_CACHE_LINE_SIZE == 128 &&
683 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
687 config.cqe_comp = cqe_comp;
688 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
689 /* Whether device supports 128B Rx CQE padding. */
690 cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
691 (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
693 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
694 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
695 tunnel_en = ((dv_attr.tunnel_offloads_caps &
696 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
697 (dv_attr.tunnel_offloads_caps &
698 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
699 (dv_attr.tunnel_offloads_caps &
700 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
702 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
703 tunnel_en ? "" : "not ");
706 "tunnel offloading disabled due to old OFED/rdma-core version");
708 config.tunnel_en = tunnel_en;
709 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
710 mpls_en = ((dv_attr.tunnel_offloads_caps &
711 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
712 (dv_attr.tunnel_offloads_caps &
713 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
714 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
715 mpls_en ? "" : "not ");
717 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
718 " old OFED/rdma-core version or firmware configuration");
720 config.mpls_en = mpls_en;
721 /* Check port status. */
722 err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
724 DRV_LOG(ERR, "port query failed: %s", strerror(err));
727 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
728 DRV_LOG(ERR, "port is not configured in Ethernet mode");
732 if (port_attr.state != IBV_PORT_ACTIVE)
733 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
734 mlx5_glue->port_state_str(port_attr.state),
736 /* Allocate private eth device data. */
737 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
739 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
741 DRV_LOG(ERR, "priv allocation failure");
746 priv->dev_port = spawn->phys_port;
747 priv->pci_dev = spawn->pci_dev;
748 priv->mtu = RTE_ETHER_MTU;
749 priv->mp_id.port_id = port_id;
750 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
751 /* Some internal functions rely on Netlink sockets, open them now. */
752 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
753 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
754 priv->representor = !!switch_info->representor;
755 priv->master = !!switch_info->master;
756 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
757 priv->vport_meta_tag = 0;
758 priv->vport_meta_mask = 0;
759 priv->pf_bond = spawn->pf_bond;
760 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
762 * The DevX port query API is implemented. E-Switch may use
763 * either vport or reg_c[0] metadata register to match on
764 * vport index. The engaged part of metadata register is
767 if (switch_info->representor || switch_info->master) {
768 devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
769 MLX5DV_DEVX_PORT_MATCH_REG_C_0;
770 err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port,
774 "can't query devx port %d on device %s",
776 mlx5_os_get_dev_device_name(spawn->phys_dev));
777 devx_port.comp_mask = 0;
780 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
781 priv->vport_meta_tag = devx_port.reg_c_0.value;
782 priv->vport_meta_mask = devx_port.reg_c_0.mask;
783 if (!priv->vport_meta_mask) {
784 DRV_LOG(ERR, "vport zero mask for port %d"
785 " on bonding device %s",
787 mlx5_os_get_dev_device_name
792 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
793 DRV_LOG(ERR, "invalid vport tag for port %d"
794 " on bonding device %s",
796 mlx5_os_get_dev_device_name
802 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
803 priv->vport_id = devx_port.vport_num;
804 } else if (spawn->pf_bond >= 0) {
805 DRV_LOG(ERR, "can't deduce vport index for port %d"
806 " on bonding device %s",
808 mlx5_os_get_dev_device_name(spawn->phys_dev));
812 /* Suppose vport index in compatible way. */
813 priv->vport_id = switch_info->representor ?
814 switch_info->port_name + 1 : -1;
818 * Kernel/rdma_core support single E-Switch per PF configurations
819 * only and vport_id field contains the vport index for
820 * associated VF, which is deduced from representor port name.
821 * For example, let's have the IB device port 10, it has
822 * attached network device eth0, which has port name attribute
823 * pf0vf2, we can deduce the VF number as 2, and set vport index
824 * as 3 (2+1). This assigning schema should be changed if the
825 * multiple E-Switch instances per PF configurations or/and PCI
826 * subfunctions are added.
828 priv->vport_id = switch_info->representor ?
829 switch_info->port_name + 1 : -1;
831 /* representor_id field keeps the unmodified VF index. */
832 priv->representor_id = switch_info->representor ?
833 switch_info->port_name : -1;
835 * Look for sibling devices in order to reuse their switch domain
836 * if any, otherwise allocate one.
838 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
839 const struct mlx5_priv *opriv =
840 rte_eth_devices[port_id].data->dev_private;
843 opriv->sh != priv->sh ||
845 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
847 priv->domain_id = opriv->domain_id;
850 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
851 err = rte_eth_switch_domain_alloc(&priv->domain_id);
854 DRV_LOG(ERR, "unable to allocate switch domain: %s",
855 strerror(rte_errno));
860 /* Override some values set by hardware configuration. */
861 mlx5_args(&config, dpdk_dev->devargs);
862 err = mlx5_dev_check_sibling_config(priv, &config);
865 config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
866 IBV_DEVICE_RAW_IP_CSUM);
867 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
868 (config.hw_csum ? "" : "not "));
869 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
870 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
871 DRV_LOG(DEBUG, "counters are not supported");
873 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
874 if (config.dv_flow_en) {
875 DRV_LOG(WARNING, "DV flow is not supported");
876 config.dv_flow_en = 0;
879 config.ind_table_max_size =
880 sh->device_attr.max_rwq_indirection_table_size;
882 * Remove this check once DPDK supports larger/variable
883 * indirection tables.
885 if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
886 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
887 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
888 config.ind_table_max_size);
889 config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
890 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
891 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
892 (config.hw_vlan_strip ? "" : "not "));
893 config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
894 IBV_RAW_PACKET_CAP_SCATTER_FCS);
895 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
896 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
897 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
898 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
899 IBV_DEVICE_PCI_WRITE_END_PADDING);
901 if (config.hw_padding && !hw_padding) {
902 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
903 config.hw_padding = 0;
904 } else if (config.hw_padding) {
905 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
907 config.tso = (sh->device_attr.max_tso > 0 &&
908 (sh->device_attr.tso_supported_qpts &
909 (1 << IBV_QPT_RAW_PACKET)));
911 config.tso_max_payload_sz = sh->device_attr.max_tso;
913 * MPW is disabled by default, while the Enhanced MPW is enabled
916 if (config.mps == MLX5_ARG_UNSET)
917 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
920 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
921 DRV_LOG(INFO, "%sMPS is %s",
922 config.mps == MLX5_MPW_ENHANCED ? "enhanced " :
923 config.mps == MLX5_MPW ? "legacy " : "",
924 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
925 if (config.cqe_comp && !cqe_comp) {
926 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
929 if (config.cqe_pad && !cqe_pad) {
930 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
932 } else if (config.cqe_pad) {
933 DRV_LOG(INFO, "Rx CQE padding is enabled");
936 priv->counter_fallback = 0;
937 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
942 if (!config.hca_attr.flow_counters_dump)
943 priv->counter_fallback = 1;
944 #ifndef HAVE_IBV_DEVX_ASYNC
945 priv->counter_fallback = 1;
947 if (priv->counter_fallback)
948 DRV_LOG(INFO, "Use fall-back DV counter management");
949 /* Check for LRO support. */
950 if (config.dest_tir && config.hca_attr.lro_cap &&
952 /* TBD check tunnel lro caps. */
953 config.lro.supported = config.hca_attr.lro_cap;
954 DRV_LOG(DEBUG, "Device supports LRO");
956 * If LRO timeout is not configured by application,
957 * use the minimal supported value.
959 if (!config.lro.timeout)
961 config.hca_attr.lro_timer_supported_periods[0];
962 DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
965 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
966 if (config.hca_attr.qos.sup && config.hca_attr.qos.srtcm_sup &&
969 config.hca_attr.qos.flow_meter_reg_c_ids;
971 * Meter needs two REG_C's for color match and pre-sfx
972 * flow match. Here get the REG_C for color match.
973 * REG_C_0 and REG_C_1 is reserved for metadata feature.
976 if (__builtin_popcount(reg_c_mask) < 1) {
978 DRV_LOG(WARNING, "No available register for"
981 priv->mtr_color_reg = ffs(reg_c_mask) - 1 +
984 priv->mtr_reg_share =
985 config.hca_attr.qos.flow_meter_reg_share;
986 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
987 priv->mtr_color_reg);
993 DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
994 config.hca_attr.dev_freq_khz);
995 DRV_LOG(DEBUG, "Packet pacing is %ssupported",
996 config.hca_attr.qos.packet_pacing ? "" : "not ");
997 DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
998 config.hca_attr.cross_channel ? "" : "not ");
999 DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
1000 config.hca_attr.wqe_index_ignore ? "" : "not ");
1001 DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
1002 config.hca_attr.non_wire_sq ? "" : "not ");
1003 DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
1004 config.hca_attr.log_max_static_sq_wq ? "" : "not ",
1005 config.hca_attr.log_max_static_sq_wq);
1006 DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
1007 config.hca_attr.qos.wqe_rate_pp ? "" : "not ");
1009 DRV_LOG(ERR, "DevX is required for packet pacing");
1013 if (!config.hca_attr.qos.packet_pacing) {
1014 DRV_LOG(ERR, "Packet pacing is not supported");
1018 if (!config.hca_attr.cross_channel) {
1019 DRV_LOG(ERR, "Cross channel operations are"
1020 " required for packet pacing");
1024 if (!config.hca_attr.wqe_index_ignore) {
1025 DRV_LOG(ERR, "WQE index ignore feature is"
1026 " required for packet pacing");
1030 if (!config.hca_attr.non_wire_sq) {
1031 DRV_LOG(ERR, "Non-wire SQ feature is"
1032 " required for packet pacing");
1036 if (!config.hca_attr.log_max_static_sq_wq) {
1037 DRV_LOG(ERR, "Static WQE SQ feature is"
1038 " required for packet pacing");
1042 if (!config.hca_attr.qos.wqe_rate_pp) {
1043 DRV_LOG(ERR, "WQE rate mode is required"
1044 " for packet pacing");
1048 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1049 DRV_LOG(ERR, "DevX does not provide UAR offset,"
1050 " can't create queues for packet pacing");
1056 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
1058 err = mlx5_devx_cmd_register_read
1059 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
1060 reg, MLX5_ST_SZ_DW(register_mtutc));
1064 /* MTUTC register is read successfully. */
1065 ts_mode = MLX5_GET(register_mtutc, reg,
1067 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1068 config.rt_timestamp = 1;
1070 /* Kernel does not support register reading. */
1071 if (config.hca_attr.dev_freq_khz ==
1072 (NS_PER_S / MS_PER_S))
1073 config.rt_timestamp = 1;
1077 * If HW has bug working with tunnel packet decapsulation and
1078 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
1079 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1081 if (config.hca_attr.scatter_fcs_w_decap_disable && config.decap_en)
1082 config.hw_fcs_strip = 0;
1083 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1084 (config.hw_fcs_strip ? "" : "not "));
1085 if (config.mprq.enabled && mprq) {
1086 if (config.mprq.stride_num_n &&
1087 (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1088 config.mprq.stride_num_n < mprq_min_stride_num_n)) {
1089 config.mprq.stride_num_n =
1090 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1091 mprq_min_stride_num_n),
1092 mprq_max_stride_num_n);
1094 "the number of strides"
1095 " for Multi-Packet RQ is out of range,"
1096 " setting default value (%u)",
1097 1 << config.mprq.stride_num_n);
1099 if (config.mprq.stride_size_n &&
1100 (config.mprq.stride_size_n > mprq_max_stride_size_n ||
1101 config.mprq.stride_size_n < mprq_min_stride_size_n)) {
1102 config.mprq.stride_size_n =
1103 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
1104 mprq_min_stride_size_n),
1105 mprq_max_stride_size_n);
1107 "the size of a stride"
1108 " for Multi-Packet RQ is out of range,"
1109 " setting default value (%u)",
1110 1 << config.mprq.stride_size_n);
1112 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1113 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
1114 } else if (config.mprq.enabled && !mprq) {
1115 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1116 config.mprq.enabled = 0;
1118 if (config.max_dump_files_num == 0)
1119 config.max_dump_files_num = 128;
1120 eth_dev = rte_eth_dev_allocate(name);
1121 if (eth_dev == NULL) {
1122 DRV_LOG(ERR, "can not allocate rte ethdev");
1126 /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
1127 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1128 if (priv->representor) {
1129 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1130 eth_dev->data->representor_id = priv->representor_id;
1133 * Store associated network device interface index. This index
1134 * is permanent throughout the lifetime of device. So, we may store
1135 * the ifindex here and use the cached value further.
1137 MLX5_ASSERT(spawn->ifindex);
1138 priv->if_index = spawn->ifindex;
1139 eth_dev->data->dev_private = priv;
1140 priv->dev_data = eth_dev->data;
1141 eth_dev->data->mac_addrs = priv->mac;
1142 eth_dev->device = dpdk_dev;
1143 /* Configure the first MAC address by default. */
1144 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1146 "port %u cannot get MAC address, is mlx5_en"
1147 " loaded? (errno: %s)",
1148 eth_dev->data->port_id, strerror(rte_errno));
1153 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1154 eth_dev->data->port_id,
1155 mac.addr_bytes[0], mac.addr_bytes[1],
1156 mac.addr_bytes[2], mac.addr_bytes[3],
1157 mac.addr_bytes[4], mac.addr_bytes[5]);
1158 #ifdef RTE_LIBRTE_MLX5_DEBUG
1160 char ifname[IF_NAMESIZE];
1162 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1163 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1164 eth_dev->data->port_id, ifname);
1166 DRV_LOG(DEBUG, "port %u ifname is unknown",
1167 eth_dev->data->port_id);
1170 /* Get actual MTU if possible. */
1171 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1176 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1178 /* Initialize burst functions to prevent crashes before link-up. */
1179 eth_dev->rx_pkt_burst = removed_rx_burst;
1180 eth_dev->tx_pkt_burst = removed_tx_burst;
1181 eth_dev->dev_ops = &mlx5_os_dev_ops;
1182 /* Register MAC address. */
1183 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1184 if (config.vf && config.vf_nl_en)
1185 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1186 mlx5_ifindex(eth_dev),
1187 eth_dev->data->mac_addrs,
1188 MLX5_MAX_MAC_ADDRESSES);
1190 priv->ctrl_flows = 0;
1191 TAILQ_INIT(&priv->flow_meters);
1192 TAILQ_INIT(&priv->flow_meter_profiles);
1193 /* Hint libmlx5 to use PMD allocator for data plane resources */
1194 mlx5_glue->dv_set_context_attr(sh->ctx,
1195 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1196 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
1197 .alloc = &mlx5_alloc_verbs_buf,
1198 .free = &mlx5_free_verbs_buf,
1201 /* Bring Ethernet device up. */
1202 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1203 eth_dev->data->port_id);
1204 mlx5_set_link_up(eth_dev);
1206 * Even though the interrupt handler is not installed yet,
1207 * interrupts will still trigger on the async_fd from
1208 * Verbs context returned by ibv_open_device().
1210 mlx5_link_update(eth_dev, 0);
1211 #ifdef HAVE_MLX5DV_DR_ESWITCH
1212 if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
1213 (switch_info->representor || switch_info->master)))
1214 config.dv_esw_en = 0;
1216 config.dv_esw_en = 0;
1218 /* Detect minimal data bytes to inline. */
1219 mlx5_set_min_inline(spawn, &config);
1220 /* Store device configuration on private structure. */
1221 priv->config = config;
1222 /* Create context for virtual machine VLAN workaround. */
1223 priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1224 if (config.dv_flow_en) {
1225 err = mlx5_alloc_shared_dr(priv);
1229 * RSS id is shared with meter flow id. Meter flow id can only
1230 * use the 24 MSB of the register.
1232 priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >>
1233 MLX5_MTR_COLOR_BITS);
1234 if (!priv->qrss_id_pool) {
1235 DRV_LOG(ERR, "can't create flow id pool");
1240 /* Supported Verbs flow priority number detection. */
1241 err = mlx5_flow_discover_priorities(eth_dev);
1246 priv->config.flow_prio = err;
1247 if (!priv->config.dv_esw_en &&
1248 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1249 DRV_LOG(WARNING, "metadata mode %u is not supported "
1250 "(no E-Switch)", priv->config.dv_xmeta_en);
1251 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1253 mlx5_set_metadata_mask(eth_dev);
1254 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1255 !priv->sh->dv_regc0_mask) {
1256 DRV_LOG(ERR, "metadata mode %u is not supported "
1257 "(no metadata reg_c[0] is available)",
1258 priv->config.dv_xmeta_en);
1263 * Allocate the buffer for flow creating, just once.
1264 * The allocation must be done before any flow creating.
1266 mlx5_flow_alloc_intermediate(eth_dev);
1267 /* Query availability of metadata reg_c's. */
1268 err = mlx5_flow_discover_mreg_c(eth_dev);
1273 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1275 "port %u extensive metadata register is not supported",
1276 eth_dev->data->port_id);
1277 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1278 DRV_LOG(ERR, "metadata mode %u is not supported "
1279 "(no metadata registers available)",
1280 priv->config.dv_xmeta_en);
1285 if (priv->config.dv_flow_en &&
1286 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1287 mlx5_flow_ext_mreg_supported(eth_dev) &&
1288 priv->sh->dv_regc0_mask) {
1289 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1290 MLX5_FLOW_MREG_HTABLE_SZ);
1291 if (!priv->mreg_cp_tbl) {
1299 if (priv->mreg_cp_tbl)
1300 mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
1302 mlx5_os_free_shared_dr(priv);
1303 if (priv->nl_socket_route >= 0)
1304 close(priv->nl_socket_route);
1305 if (priv->nl_socket_rdma >= 0)
1306 close(priv->nl_socket_rdma);
1307 if (priv->vmwa_context)
1308 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1309 if (priv->qrss_id_pool)
1310 mlx5_flow_id_pool_release(priv->qrss_id_pool);
1312 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1314 if (eth_dev != NULL)
1315 eth_dev->data->dev_private = NULL;
1317 if (eth_dev != NULL) {
1318 /* mac_addrs must not be freed alone because part of
1321 eth_dev->data->mac_addrs = NULL;
1322 rte_eth_dev_release_port(eth_dev);
1325 mlx5_free_shared_dev_ctx(sh);
1326 MLX5_ASSERT(err > 0);
1332 * Comparison callback to sort device data.
1334 * This is meant to be used with qsort().
1337 * Pointer to pointer to first data object.
1339 * Pointer to pointer to second data object.
1342 * 0 if both objects are equal, less than 0 if the first argument is less
1343 * than the second, greater than 0 otherwise.
1346 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1348 const struct mlx5_switch_info *si_a =
1349 &((const struct mlx5_dev_spawn_data *)a)->info;
1350 const struct mlx5_switch_info *si_b =
1351 &((const struct mlx5_dev_spawn_data *)b)->info;
1354 /* Master device first. */
1355 ret = si_b->master - si_a->master;
1358 /* Then representor devices. */
1359 ret = si_b->representor - si_a->representor;
1362 /* Unidentified devices come last in no specific order. */
1363 if (!si_a->representor)
1365 /* Order representors by name. */
1366 return si_a->port_name - si_b->port_name;
1370 * Match PCI information for possible slaves of bonding device.
1372 * @param[in] ibv_dev
1373 * Pointer to Infiniband device structure.
1374 * @param[in] pci_dev
1375 * Pointer to PCI device structure to match PCI address.
1376 * @param[in] nl_rdma
1377 * Netlink RDMA group socket handle.
1380 * negative value if no bonding device found, otherwise
1381 * positive index of slave PF in bonding.
1384 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
1385 const struct rte_pci_device *pci_dev,
1388 char ifname[IF_NAMESIZE + 1];
1389 unsigned int ifindex;
1395 * Try to get master device name. If something goes
1396 * wrong suppose the lack of kernel support and no
1401 if (!strstr(ibv_dev->name, "bond"))
1403 np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
1407 * The Master device might not be on the predefined
1408 * port (not on port index 1, it is not garanted),
1409 * we have to scan all Infiniband device port and
1412 for (i = 1; i <= np; ++i) {
1413 /* Check whether Infiniband port is populated. */
1414 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
1417 if (!if_indextoname(ifindex, ifname))
1419 /* Try to read bonding slave names from sysfs. */
1421 "/sys/class/net/%s/master/bonding/slaves", ifname);
1422 file = fopen(slaves, "r");
1428 /* Use safe format to check maximal buffer length. */
1429 MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1430 while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1431 char tmp_str[IF_NAMESIZE + 32];
1432 struct rte_pci_addr pci_addr;
1433 struct mlx5_switch_info info;
1435 /* Process slave interface names in the loop. */
1436 snprintf(tmp_str, sizeof(tmp_str),
1437 "/sys/class/net/%s", ifname);
1438 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
1439 DRV_LOG(WARNING, "can not get PCI address"
1440 " for netdev \"%s\"", ifname);
1443 if (pci_dev->addr.domain != pci_addr.domain ||
1444 pci_dev->addr.bus != pci_addr.bus ||
1445 pci_dev->addr.devid != pci_addr.devid ||
1446 pci_dev->addr.function != pci_addr.function)
1448 /* Slave interface PCI address match found. */
1450 snprintf(tmp_str, sizeof(tmp_str),
1451 "/sys/class/net/%s/phys_port_name", ifname);
1452 file = fopen(tmp_str, "rb");
1455 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1456 if (fscanf(file, "%32s", tmp_str) == 1)
1457 mlx5_translate_port_name(tmp_str, &info);
1458 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
1459 info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1460 pf = info.port_name;
1469 * DPDK callback to register a PCI device.
1471 * This function spawns Ethernet devices out of a given PCI device.
1473 * @param[in] pci_drv
1474 * PCI driver structure (mlx5_driver).
1475 * @param[in] pci_dev
1476 * PCI device information.
1479 * 0 on success, a negative errno value otherwise and rte_errno is set.
1482 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1483 struct rte_pci_device *pci_dev)
1485 struct ibv_device **ibv_list;
1487 * Number of found IB Devices matching with requested PCI BDF.
1488 * nd != 1 means there are multiple IB devices over the same
1489 * PCI device and we have representors and master.
1491 unsigned int nd = 0;
1493 * Number of found IB device Ports. nd = 1 and np = 1..n means
1494 * we have the single multiport IB device, and there may be
1495 * representors attached to some of found ports.
1497 unsigned int np = 0;
1499 * Number of DPDK ethernet devices to Spawn - either over
1500 * multiple IB devices or multiple ports of single IB device.
1501 * Actually this is the number of iterations to spawn.
1503 unsigned int ns = 0;
1506 * < 0 - no bonding device (single one)
1507 * >= 0 - bonding device (value is slave PF index)
1510 struct mlx5_dev_spawn_data *list = NULL;
1511 struct mlx5_dev_config dev_config;
1514 if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) {
1515 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
1519 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1520 mlx5_pmd_socket_init();
1521 ret = mlx5_init_once();
1523 DRV_LOG(ERR, "unable to init PMD global data: %s",
1524 strerror(rte_errno));
1527 MLX5_ASSERT(pci_drv == &mlx5_driver);
1529 ibv_list = mlx5_glue->get_device_list(&ret);
1531 rte_errno = errno ? errno : ENOSYS;
1532 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1536 * First scan the list of all Infiniband devices to find
1537 * matching ones, gathering into the list.
1539 struct ibv_device *ibv_match[ret + 1];
1540 int nl_route = mlx5_nl_init(NETLINK_ROUTE);
1541 int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1545 struct rte_pci_addr pci_addr;
1547 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1548 bd = mlx5_device_bond_pci_match
1549 (ibv_list[ret], pci_dev, nl_rdma);
1552 * Bonding device detected. Only one match is allowed,
1553 * the bonding is supported over multi-port IB device,
1554 * there should be no matches on representor PCI
1555 * functions or non VF LAG bonding devices with
1556 * specified address.
1560 "multiple PCI match on bonding device"
1561 "\"%s\" found", ibv_list[ret]->name);
1566 DRV_LOG(INFO, "PCI information matches for"
1567 " slave %d bonding device \"%s\"",
1568 bd, ibv_list[ret]->name);
1569 ibv_match[nd++] = ibv_list[ret];
1572 if (mlx5_dev_to_pci_addr
1573 (ibv_list[ret]->ibdev_path, &pci_addr))
1575 if (pci_dev->addr.domain != pci_addr.domain ||
1576 pci_dev->addr.bus != pci_addr.bus ||
1577 pci_dev->addr.devid != pci_addr.devid ||
1578 pci_dev->addr.function != pci_addr.function)
1580 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1581 ibv_list[ret]->name);
1582 ibv_match[nd++] = ibv_list[ret];
1584 ibv_match[nd] = NULL;
1586 /* No device matches, just complain and bail out. */
1588 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1589 " are kernel drivers loaded?",
1590 pci_dev->addr.domain, pci_dev->addr.bus,
1591 pci_dev->addr.devid, pci_dev->addr.function);
1598 * Found single matching device may have multiple ports.
1599 * Each port may be representor, we have to check the port
1600 * number and check the representors existence.
1603 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
1605 DRV_LOG(WARNING, "can not get IB device \"%s\""
1606 " ports number", ibv_match[0]->name);
1607 if (bd >= 0 && !np) {
1608 DRV_LOG(ERR, "can not get ports"
1609 " for bonding device");
1615 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
1618 * This may happen if there is VF LAG kernel support and
1619 * application is compiled with older rdma_core library.
1622 "No kernel/verbs support for VF LAG bonding found.");
1623 rte_errno = ENOTSUP;
1629 * Now we can determine the maximal
1630 * amount of devices to be spawned.
1632 list = mlx5_malloc(MLX5_MEM_ZERO,
1633 sizeof(struct mlx5_dev_spawn_data) *
1635 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1637 DRV_LOG(ERR, "spawn data array allocation failure");
1642 if (bd >= 0 || np > 1) {
1644 * Single IB device with multiple ports found,
1645 * it may be E-Switch master device and representors.
1646 * We have to perform identification through the ports.
1648 MLX5_ASSERT(nl_rdma >= 0);
1649 MLX5_ASSERT(ns == 0);
1650 MLX5_ASSERT(nd == 1);
1652 for (i = 1; i <= np; ++i) {
1653 list[ns].max_port = np;
1654 list[ns].phys_port = i;
1655 list[ns].phys_dev = ibv_match[0];
1656 list[ns].eth_dev = NULL;
1657 list[ns].pci_dev = pci_dev;
1658 list[ns].pf_bond = bd;
1659 list[ns].ifindex = mlx5_nl_ifindex
1661 mlx5_os_get_dev_device_name
1662 (list[ns].phys_dev), i);
1663 if (!list[ns].ifindex) {
1665 * No network interface index found for the
1666 * specified port, it means there is no
1667 * representor on this port. It's OK,
1668 * there can be disabled ports, for example
1669 * if sriov_numvfs < sriov_totalvfs.
1675 ret = mlx5_nl_switch_info
1679 if (ret || (!list[ns].info.representor &&
1680 !list[ns].info.master)) {
1682 * We failed to recognize representors with
1683 * Netlink, let's try to perform the task
1686 ret = mlx5_sysfs_switch_info
1690 if (!ret && bd >= 0) {
1691 switch (list[ns].info.name_type) {
1692 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
1693 if (list[ns].info.port_name == bd)
1696 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
1698 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
1699 if (list[ns].info.pf_num == bd)
1707 if (!ret && (list[ns].info.representor ^
1708 list[ns].info.master))
1713 "unable to recognize master/representors"
1714 " on the IB device with multiple ports");
1721 * The existence of several matching entries (nd > 1) means
1722 * port representors have been instantiated. No existing Verbs
1723 * call nor sysfs entries can tell them apart, this can only
1724 * be done through Netlink calls assuming kernel drivers are
1725 * recent enough to support them.
1727 * In the event of identification failure through Netlink,
1728 * try again through sysfs, then:
1730 * 1. A single IB device matches (nd == 1) with single
1731 * port (np=0/1) and is not a representor, assume
1732 * no switch support.
1734 * 2. Otherwise no safe assumptions can be made;
1735 * complain louder and bail out.
1737 for (i = 0; i != nd; ++i) {
1738 memset(&list[ns].info, 0, sizeof(list[ns].info));
1739 list[ns].max_port = 1;
1740 list[ns].phys_port = 1;
1741 list[ns].phys_dev = ibv_match[i];
1742 list[ns].eth_dev = NULL;
1743 list[ns].pci_dev = pci_dev;
1744 list[ns].pf_bond = -1;
1745 list[ns].ifindex = 0;
1747 list[ns].ifindex = mlx5_nl_ifindex
1749 mlx5_os_get_dev_device_name
1750 (list[ns].phys_dev), 1);
1751 if (!list[ns].ifindex) {
1752 char ifname[IF_NAMESIZE];
1755 * Netlink failed, it may happen with old
1756 * ib_core kernel driver (before 4.16).
1757 * We can assume there is old driver because
1758 * here we are processing single ports IB
1759 * devices. Let's try sysfs to retrieve
1760 * the ifindex. The method works for
1761 * master device only.
1765 * Multiple devices found, assume
1766 * representors, can not distinguish
1767 * master/representor and retrieve
1768 * ifindex via sysfs.
1772 ret = mlx5_get_ifname_sysfs
1773 (ibv_match[i]->ibdev_path, ifname);
1776 if_nametoindex(ifname);
1777 if (!list[ns].ifindex) {
1779 * No network interface index found
1780 * for the specified device, it means
1781 * there it is neither representor
1789 ret = mlx5_nl_switch_info
1793 if (ret || (!list[ns].info.representor &&
1794 !list[ns].info.master)) {
1796 * We failed to recognize representors with
1797 * Netlink, let's try to perform the task
1800 ret = mlx5_sysfs_switch_info
1804 if (!ret && (list[ns].info.representor ^
1805 list[ns].info.master)) {
1807 } else if ((nd == 1) &&
1808 !list[ns].info.representor &&
1809 !list[ns].info.master) {
1811 * Single IB device with
1812 * one physical port and
1813 * attached network device.
1814 * May be SRIOV is not enabled
1815 * or there is no representors.
1817 DRV_LOG(INFO, "no E-Switch support detected");
1824 "unable to recognize master/representors"
1825 " on the multiple IB devices");
1833 * Sort list to probe devices in natural order for users convenience
1834 * (i.e. master first, then representors from lowest to highest ID).
1836 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
1837 /* Default configuration. */
1838 dev_config = (struct mlx5_dev_config){
1840 .mps = MLX5_ARG_UNSET,
1841 .dbnc = MLX5_ARG_UNSET,
1843 .txq_inline_max = MLX5_ARG_UNSET,
1844 .txq_inline_min = MLX5_ARG_UNSET,
1845 .txq_inline_mpw = MLX5_ARG_UNSET,
1846 .txqs_inline = MLX5_ARG_UNSET,
1848 .mr_ext_memseg_en = 1,
1850 .enabled = 0, /* Disabled by default. */
1853 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
1854 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
1859 .log_hp_size = MLX5_ARG_UNSET,
1861 /* Device specific configuration. */
1862 switch (pci_dev->id.device_id) {
1863 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1864 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1865 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1866 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1867 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
1868 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
1869 case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
1875 for (i = 0; i != ns; ++i) {
1878 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
1881 if (!list[i].eth_dev) {
1882 if (rte_errno != EBUSY && rte_errno != EEXIST)
1884 /* Device is disabled or already spawned. Ignore it. */
1887 restore = list[i].eth_dev->data->dev_flags;
1888 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
1889 /* Restore non-PCI flags cleared by the above call. */
1890 list[i].eth_dev->data->dev_flags |= restore;
1891 rte_eth_dev_probing_finish(list[i].eth_dev);
1895 "probe of PCI device " PCI_PRI_FMT " aborted after"
1896 " encountering an error: %s",
1897 pci_dev->addr.domain, pci_dev->addr.bus,
1898 pci_dev->addr.devid, pci_dev->addr.function,
1899 strerror(rte_errno));
1903 if (!list[i].eth_dev)
1905 mlx5_dev_close(list[i].eth_dev);
1906 /* mac_addrs must not be freed because in dev_private */
1907 list[i].eth_dev->data->mac_addrs = NULL;
1908 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
1910 /* Restore original error. */
1917 * Do the routine cleanup:
1918 * - close opened Netlink sockets
1919 * - free allocated spawn data array
1920 * - free the Infiniband device list
1928 MLX5_ASSERT(ibv_list);
1929 mlx5_glue->free_device_list(ibv_list);
1934 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
1939 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1940 /* Get environment variable to store. */
1941 env = getenv(MLX5_SHUT_UP_BF);
1942 value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
1943 if (config->dbnc == MLX5_ARG_UNSET)
1944 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
1946 setenv(MLX5_SHUT_UP_BF,
1947 config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
1952 mlx5_restore_doorbell_mapping_env(int value)
1954 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1955 /* Restore the original environment variable state. */
1956 if (value == MLX5_ARG_UNSET)
1957 unsetenv(MLX5_SHUT_UP_BF);
1959 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
1963 * Extract pdn of PD object using DV API.
1966 * Pointer to the verbs PD object.
1968 * Pointer to the PD object number variable.
1971 * 0 on success, error value otherwise.
1974 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1976 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1977 struct mlx5dv_obj obj;
1978 struct mlx5dv_pd pd_info;
1982 obj.pd.out = &pd_info;
1983 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
1985 DRV_LOG(DEBUG, "Fail to get PD object info");
1994 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
1998 * Function API to open IB device.
2000 * This function calls the Linux glue APIs to open a device.
2003 * Pointer to the IB device attributes (name, port, etc).
2004 * @param[out] config
2005 * Pointer to device configuration structure.
2007 * Pointer to shared context structure.
2010 * 0 on success, a positive error value otherwise.
2013 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
2014 const struct mlx5_dev_config *config,
2015 struct mlx5_dev_ctx_shared *sh)
2020 sh->numa_node = spawn->pci_dev->device.numa_node;
2021 pthread_mutex_init(&sh->txpp.mutex, NULL);
2023 * Configure environment variable "MLX5_BF_SHUT_UP"
2024 * before the device creation. The rdma_core library
2025 * checks the variable at device creation and
2026 * stores the result internally.
2028 dbmap_env = mlx5_config_doorbell_mapping_env(config);
2029 /* Try to open IB device with DV first, then usual Verbs. */
2031 sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
2034 DRV_LOG(DEBUG, "DevX is supported");
2035 /* The device is created, no need for environment. */
2036 mlx5_restore_doorbell_mapping_env(dbmap_env);
2038 /* The environment variable is still configured. */
2039 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
2040 err = errno ? errno : ENODEV;
2042 * The environment variable is not needed anymore,
2043 * all device creation attempts are completed.
2045 mlx5_restore_doorbell_mapping_env(dbmap_env);
2048 DRV_LOG(DEBUG, "DevX is NOT supported");
2055 * Install shared asynchronous device events handler.
2056 * This function is implemented to support event sharing
2057 * between multiple ports of single IB device.
2060 * Pointer to mlx5_dev_ctx_shared object.
2063 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
2068 sh->intr_handle.fd = -1;
2069 flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
2070 ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
2071 F_SETFL, flags | O_NONBLOCK);
2073 DRV_LOG(INFO, "failed to change file descriptor async event"
2076 sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
2077 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
2078 if (rte_intr_callback_register(&sh->intr_handle,
2079 mlx5_dev_interrupt_handler, sh)) {
2080 DRV_LOG(INFO, "Fail to install the shared interrupt.");
2081 sh->intr_handle.fd = -1;
2085 #ifdef HAVE_IBV_DEVX_ASYNC
2086 sh->intr_handle_devx.fd = -1;
2088 (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
2089 struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
2091 DRV_LOG(INFO, "failed to allocate devx_comp.");
2094 flags = fcntl(devx_comp->fd, F_GETFL);
2095 ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
2097 DRV_LOG(INFO, "failed to change file descriptor"
2101 sh->intr_handle_devx.fd = devx_comp->fd;
2102 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
2103 if (rte_intr_callback_register(&sh->intr_handle_devx,
2104 mlx5_dev_interrupt_handler_devx, sh)) {
2105 DRV_LOG(INFO, "Fail to install the devx shared"
2107 sh->intr_handle_devx.fd = -1;
2109 #endif /* HAVE_IBV_DEVX_ASYNC */
2114 * Uninstall shared asynchronous device events handler.
2115 * This function is implemented to support event sharing
2116 * between multiple ports of single IB device.
2119 * Pointer to mlx5_dev_ctx_shared object.
2122 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
2124 if (sh->intr_handle.fd >= 0)
2125 mlx5_intr_callback_unregister(&sh->intr_handle,
2126 mlx5_dev_interrupt_handler, sh);
2127 #ifdef HAVE_IBV_DEVX_ASYNC
2128 if (sh->intr_handle_devx.fd >= 0)
2129 rte_intr_callback_unregister(&sh->intr_handle_devx,
2130 mlx5_dev_interrupt_handler_devx, sh);
2132 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
2137 * Read statistics by a named counter.
2140 * Pointer to the private device data structure.
2141 * @param[in] ctr_name
2142 * Pointer to the name of the statistic counter to read
2144 * Pointer to read statistic value.
2146 * 0 on success and stat is valud, 1 if failed to read the value
2151 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2157 MKSTR(path, "%s/ports/%d/hw_counters/%s",
2158 priv->sh->ibdev_path,
2161 fd = open(path, O_RDONLY);
2163 char buf[21] = {'\0'};
2164 ssize_t n = read(fd, buf, sizeof(buf));
2168 *stat = strtoull(buf, NULL, 10);
2178 * Set the reg_mr and dereg_mr call backs
2180 * @param reg_mr_cb[out]
2181 * Pointer to reg_mr func
2182 * @param dereg_mr_cb[out]
2183 * Pointer to dereg_mr func
2187 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
2188 mlx5_dereg_mr_t *dereg_mr_cb)
2190 *reg_mr_cb = mlx5_verbs_ops.reg_mr;
2191 *dereg_mr_cb = mlx5_verbs_ops.dereg_mr;
2195 * Remove a MAC address from device
2198 * Pointer to Ethernet device structure.
2200 * MAC address index.
2203 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2205 struct mlx5_priv *priv = dev->data->dev_private;
2206 const int vf = priv->config.vf;
2209 mlx5_nl_mac_addr_remove(priv->nl_socket_route,
2210 mlx5_ifindex(dev), priv->mac_own,
2211 &dev->data->mac_addrs[index], index);
2215 * Adds a MAC address to the device
2218 * Pointer to Ethernet device structure.
2220 * MAC address to register.
2222 * MAC address index.
2225 * 0 on success, a negative errno value otherwise
2228 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2231 struct mlx5_priv *priv = dev->data->dev_private;
2232 const int vf = priv->config.vf;
2236 ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
2237 mlx5_ifindex(dev), priv->mac_own,
2243 * Modify a VF MAC address
2246 * Pointer to device private data.
2248 * MAC address to modify into.
2250 * Net device interface index
2255 * 0 on success, a negative errno value otherwise
2258 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
2259 unsigned int iface_idx,
2260 struct rte_ether_addr *mac_addr,
2263 return mlx5_nl_vf_mac_addr_modify
2264 (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
2268 * Set device promiscuous mode
2271 * Pointer to Ethernet device structure.
2273 * 0 - promiscuous is disabled, otherwise - enabled
2276 * 0 on success, a negative error value otherwise
2279 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
2281 struct mlx5_priv *priv = dev->data->dev_private;
2283 return mlx5_nl_promisc(priv->nl_socket_route,
2284 mlx5_ifindex(dev), !!enable);
2288 * Set device promiscuous mode
2291 * Pointer to Ethernet device structure.
2293 * 0 - all multicase is disabled, otherwise - enabled
2296 * 0 on success, a negative error value otherwise
2299 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
2301 struct mlx5_priv *priv = dev->data->dev_private;
2303 return mlx5_nl_allmulti(priv->nl_socket_route,
2304 mlx5_ifindex(dev), !!enable);
2307 const struct eth_dev_ops mlx5_os_dev_ops = {
2308 .dev_configure = mlx5_dev_configure,
2309 .dev_start = mlx5_dev_start,
2310 .dev_stop = mlx5_dev_stop,
2311 .dev_set_link_down = mlx5_set_link_down,
2312 .dev_set_link_up = mlx5_set_link_up,
2313 .dev_close = mlx5_dev_close,
2314 .promiscuous_enable = mlx5_promiscuous_enable,
2315 .promiscuous_disable = mlx5_promiscuous_disable,
2316 .allmulticast_enable = mlx5_allmulticast_enable,
2317 .allmulticast_disable = mlx5_allmulticast_disable,
2318 .link_update = mlx5_link_update,
2319 .stats_get = mlx5_stats_get,
2320 .stats_reset = mlx5_stats_reset,
2321 .xstats_get = mlx5_xstats_get,
2322 .xstats_reset = mlx5_xstats_reset,
2323 .xstats_get_names = mlx5_xstats_get_names,
2324 .fw_version_get = mlx5_fw_version_get,
2325 .dev_infos_get = mlx5_dev_infos_get,
2326 .read_clock = mlx5_txpp_read_clock,
2327 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2328 .vlan_filter_set = mlx5_vlan_filter_set,
2329 .rx_queue_setup = mlx5_rx_queue_setup,
2330 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2331 .tx_queue_setup = mlx5_tx_queue_setup,
2332 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2333 .rx_queue_release = mlx5_rx_queue_release,
2334 .tx_queue_release = mlx5_tx_queue_release,
2335 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2336 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2337 .mac_addr_remove = mlx5_mac_addr_remove,
2338 .mac_addr_add = mlx5_mac_addr_add,
2339 .mac_addr_set = mlx5_mac_addr_set,
2340 .set_mc_addr_list = mlx5_set_mc_addr_list,
2341 .mtu_set = mlx5_dev_set_mtu,
2342 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2343 .vlan_offload_set = mlx5_vlan_offload_set,
2344 .reta_update = mlx5_dev_rss_reta_update,
2345 .reta_query = mlx5_dev_rss_reta_query,
2346 .rss_hash_update = mlx5_rss_hash_update,
2347 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
2348 .filter_ctrl = mlx5_dev_filter_ctrl,
2349 .rx_descriptor_status = mlx5_rx_descriptor_status,
2350 .tx_descriptor_status = mlx5_tx_descriptor_status,
2351 .rxq_info_get = mlx5_rxq_info_get,
2352 .txq_info_get = mlx5_txq_info_get,
2353 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2354 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2355 .rx_queue_count = mlx5_rx_queue_count,
2356 .rx_queue_intr_enable = mlx5_rx_intr_enable,
2357 .rx_queue_intr_disable = mlx5_rx_intr_disable,
2358 .is_removed = mlx5_is_removed,
2359 .udp_tunnel_port_add = mlx5_udp_tunnel_port_add,
2360 .get_module_info = mlx5_get_module_info,
2361 .get_module_eeprom = mlx5_get_module_eeprom,
2362 .hairpin_cap_get = mlx5_hairpin_cap_get,
2363 .mtr_ops_get = mlx5_flow_meter_ops_get,
2366 /* Available operations from secondary process. */
2367 const struct eth_dev_ops mlx5_os_dev_sec_ops = {
2368 .stats_get = mlx5_stats_get,
2369 .stats_reset = mlx5_stats_reset,
2370 .xstats_get = mlx5_xstats_get,
2371 .xstats_reset = mlx5_xstats_reset,
2372 .xstats_get_names = mlx5_xstats_get_names,
2373 .fw_version_get = mlx5_fw_version_get,
2374 .dev_infos_get = mlx5_dev_infos_get,
2375 .read_clock = mlx5_txpp_read_clock,
2376 .rx_descriptor_status = mlx5_rx_descriptor_status,
2377 .tx_descriptor_status = mlx5_tx_descriptor_status,
2378 .rxq_info_get = mlx5_rxq_info_get,
2379 .txq_info_get = mlx5_txq_info_get,
2380 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2381 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2382 .get_module_info = mlx5_get_module_info,
2383 .get_module_eeprom = mlx5_get_module_eeprom,
2386 /* Available operations in flow isolated mode. */
2387 const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
2388 .dev_configure = mlx5_dev_configure,
2389 .dev_start = mlx5_dev_start,
2390 .dev_stop = mlx5_dev_stop,
2391 .dev_set_link_down = mlx5_set_link_down,
2392 .dev_set_link_up = mlx5_set_link_up,
2393 .dev_close = mlx5_dev_close,
2394 .promiscuous_enable = mlx5_promiscuous_enable,
2395 .promiscuous_disable = mlx5_promiscuous_disable,
2396 .allmulticast_enable = mlx5_allmulticast_enable,
2397 .allmulticast_disable = mlx5_allmulticast_disable,
2398 .link_update = mlx5_link_update,
2399 .stats_get = mlx5_stats_get,
2400 .stats_reset = mlx5_stats_reset,
2401 .xstats_get = mlx5_xstats_get,
2402 .xstats_reset = mlx5_xstats_reset,
2403 .xstats_get_names = mlx5_xstats_get_names,
2404 .fw_version_get = mlx5_fw_version_get,
2405 .dev_infos_get = mlx5_dev_infos_get,
2406 .read_clock = mlx5_txpp_read_clock,
2407 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2408 .vlan_filter_set = mlx5_vlan_filter_set,
2409 .rx_queue_setup = mlx5_rx_queue_setup,
2410 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2411 .tx_queue_setup = mlx5_tx_queue_setup,
2412 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2413 .rx_queue_release = mlx5_rx_queue_release,
2414 .tx_queue_release = mlx5_tx_queue_release,
2415 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2416 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2417 .mac_addr_remove = mlx5_mac_addr_remove,
2418 .mac_addr_add = mlx5_mac_addr_add,
2419 .mac_addr_set = mlx5_mac_addr_set,
2420 .set_mc_addr_list = mlx5_set_mc_addr_list,
2421 .mtu_set = mlx5_dev_set_mtu,
2422 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2423 .vlan_offload_set = mlx5_vlan_offload_set,
2424 .filter_ctrl = mlx5_dev_filter_ctrl,
2425 .rx_descriptor_status = mlx5_rx_descriptor_status,
2426 .tx_descriptor_status = mlx5_tx_descriptor_status,
2427 .rxq_info_get = mlx5_rxq_info_get,
2428 .txq_info_get = mlx5_txq_info_get,
2429 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2430 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2431 .rx_queue_intr_enable = mlx5_rx_intr_enable,
2432 .rx_queue_intr_disable = mlx5_rx_intr_disable,
2433 .is_removed = mlx5_is_removed,
2434 .get_module_info = mlx5_get_module_info,
2435 .get_module_eeprom = mlx5_get_module_eeprom,
2436 .hairpin_cap_get = mlx5_hairpin_cap_get,
2437 .mtr_ops_get = mlx5_flow_meter_ops_get,