1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
13 #include <linux/rtnetlink.h>
14 #include <linux/sockios.h>
15 #include <linux/ethtool.h>
18 #include <rte_malloc.h>
19 #include <ethdev_driver.h>
20 #include <ethdev_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_bus_auxiliary.h>
24 #include <rte_common.h>
25 #include <rte_kvargs.h>
26 #include <rte_rwlock.h>
27 #include <rte_spinlock.h>
28 #include <rte_string_fns.h>
29 #include <rte_alarm.h>
30 #include <rte_eal_paging.h>
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
34 #include <mlx5_common.h>
35 #include <mlx5_common_mp.h>
36 #include <mlx5_common_mr.h>
37 #include <mlx5_malloc.h>
39 #include "mlx5_defs.h"
41 #include "mlx5_common_os.h"
42 #include "mlx5_utils.h"
43 #include "mlx5_rxtx.h"
46 #include "mlx5_autoconf.h"
47 #include "mlx5_flow.h"
48 #include "rte_pmd_mlx5.h"
49 #include "mlx5_verbs.h"
51 #include "mlx5_devx.h"
53 #ifndef HAVE_IBV_MLX5_MOD_MPW
54 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
55 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
58 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
59 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
62 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
64 /* Spinlock for mlx5_shared_data allocation. */
65 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
67 /* Process local data for secondary processes. */
68 static struct mlx5_local_data mlx5_local_data;
70 /* rte flow indexed pool configuration. */
71 static struct mlx5_indexed_pool_config icfg[] = {
73 .size = sizeof(struct rte_flow),
77 .malloc = mlx5_malloc,
80 .type = "ctl_flow_ipool",
83 .size = sizeof(struct rte_flow),
89 .malloc = mlx5_malloc,
91 .per_core_cache = 1 << 14,
92 .type = "rte_flow_ipool",
95 .size = sizeof(struct rte_flow),
101 .malloc = mlx5_malloc,
104 .type = "mcp_flow_ipool",
109 * Set the completion channel file descriptor interrupt as non-blocking.
112 * Pointer to RQ channel object, which includes the channel fd
115 * The file descriptor (representing the interrupt) used in this channel.
118 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
121 mlx5_os_set_nonblock_channel_fd(int fd)
125 flags = fcntl(fd, F_GETFL);
126 return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
130 * Get mlx5 device attributes. The glue function query_device_ex() is called
131 * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
132 * device attributes from the glue out parameter.
135 * Pointer to mlx5 device.
138 * Pointer to mlx5 device attributes.
141 * 0 on success, non zero error number otherwise
144 mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
145 struct mlx5_dev_attr *device_attr)
148 struct ibv_context *ctx = cdev->ctx;
149 struct ibv_device_attr_ex attr_ex;
151 memset(device_attr, 0, sizeof(*device_attr));
152 err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
155 device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
156 device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
157 device_attr->max_sge = attr_ex.orig_attr.max_sge;
158 device_attr->max_cq = attr_ex.orig_attr.max_cq;
159 device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
160 device_attr->max_mr = attr_ex.orig_attr.max_mr;
161 device_attr->max_pd = attr_ex.orig_attr.max_pd;
162 device_attr->max_qp = attr_ex.orig_attr.max_qp;
163 device_attr->max_srq = attr_ex.orig_attr.max_srq;
164 device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
165 device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
166 device_attr->max_rwq_indirection_table_size =
167 attr_ex.rss_caps.max_rwq_indirection_table_size;
168 device_attr->max_tso = attr_ex.tso_caps.max_tso;
169 device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
171 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
172 err = mlx5_glue->dv_query_device(ctx, &dv_attr);
176 device_attr->flags = dv_attr.flags;
177 device_attr->comp_mask = dv_attr.comp_mask;
178 #ifdef HAVE_IBV_MLX5_MOD_SWP
179 device_attr->sw_parsing_offloads =
180 dv_attr.sw_parsing_caps.sw_parsing_offloads;
182 device_attr->min_single_stride_log_num_of_bytes =
183 dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
184 device_attr->max_single_stride_log_num_of_bytes =
185 dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
186 device_attr->min_single_wqe_log_num_of_strides =
187 dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
188 device_attr->max_single_wqe_log_num_of_strides =
189 dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
190 device_attr->stride_supported_qpts =
191 dv_attr.striding_rq_caps.supported_qpts;
192 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
193 device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
195 strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver,
196 sizeof(device_attr->fw_ver));
202 * Detect misc5 support or not
205 * Device private data pointer
207 #ifdef HAVE_MLX5DV_DR
209 __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
211 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
212 /* Dummy VxLAN matcher to detect rdma-core misc5 cap
213 * Case: IPv4--->UDP--->VxLAN--->vni
216 struct mlx5_flow_dv_match_params matcher_mask;
221 uint32_t *tunnel_header_m;
222 struct mlx5dv_flow_matcher_attr dv_attr;
224 memset(&matcher_mask, 0, sizeof(matcher_mask));
225 matcher_mask.size = sizeof(matcher_mask.buf);
226 match_m = matcher_mask.buf;
227 headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
228 misc5_m = MLX5_ADDR_OF(fte_match_param,
229 match_m, misc_parameters_5);
230 tunnel_header_m = (uint32_t *)
231 MLX5_ADDR_OF(fte_match_set_misc5,
232 misc5_m, tunnel_header_1);
233 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
234 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 4);
235 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
236 *tunnel_header_m = 0xffffff;
238 tbl = mlx5_glue->dr_create_flow_tbl(priv->sh->rx_domain, 1);
240 DRV_LOG(INFO, "No SW steering support");
243 dv_attr.type = IBV_FLOW_ATTR_NORMAL,
244 dv_attr.match_mask = (void *)&matcher_mask,
245 dv_attr.match_criteria_enable =
246 (1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT) |
247 (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT);
248 dv_attr.priority = 3;
249 #ifdef HAVE_MLX5DV_DR_ESWITCH
251 if (priv->config.dv_esw_en) {
252 /* FDB enabled reg_c_0 */
253 dv_attr.match_criteria_enable |=
254 (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT);
255 misc2_m = MLX5_ADDR_OF(fte_match_param,
256 match_m, misc_parameters_2);
257 MLX5_SET(fte_match_set_misc2, misc2_m,
258 metadata_reg_c_0, 0xffff);
261 matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->cdev->ctx,
264 priv->sh->misc5_cap = 1;
265 mlx5_glue->dv_destroy_flow_matcher(matcher);
267 mlx5_glue->dr_destroy_flow_tbl(tbl);
275 * Initialize DR related data within private structure.
276 * Routine checks the reference counter and does actual
277 * resources creation/initialization only if counter is zero.
280 * Pointer to the private device data structure.
283 * Zero on success, positive error code otherwise.
286 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
288 struct mlx5_dev_ctx_shared *sh = priv->sh;
289 char s[MLX5_NAME_SIZE] __rte_unused;
292 MLX5_ASSERT(sh && sh->refcnt);
295 err = mlx5_alloc_table_hash_list(priv);
298 /* The resources below are only valid with DV support. */
299 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
300 /* Init port id action list. */
301 snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
302 sh->port_id_action_list = mlx5_list_create(s, sh, true,
303 flow_dv_port_id_create_cb,
304 flow_dv_port_id_match_cb,
305 flow_dv_port_id_remove_cb,
306 flow_dv_port_id_clone_cb,
307 flow_dv_port_id_clone_free_cb);
308 if (!sh->port_id_action_list)
310 /* Init push vlan action list. */
311 snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
312 sh->push_vlan_action_list = mlx5_list_create(s, sh, true,
313 flow_dv_push_vlan_create_cb,
314 flow_dv_push_vlan_match_cb,
315 flow_dv_push_vlan_remove_cb,
316 flow_dv_push_vlan_clone_cb,
317 flow_dv_push_vlan_clone_free_cb);
318 if (!sh->push_vlan_action_list)
320 /* Init sample action list. */
321 snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
322 sh->sample_action_list = mlx5_list_create(s, sh, true,
323 flow_dv_sample_create_cb,
324 flow_dv_sample_match_cb,
325 flow_dv_sample_remove_cb,
326 flow_dv_sample_clone_cb,
327 flow_dv_sample_clone_free_cb);
328 if (!sh->sample_action_list)
330 /* Init dest array action list. */
331 snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
332 sh->dest_array_list = mlx5_list_create(s, sh, true,
333 flow_dv_dest_array_create_cb,
334 flow_dv_dest_array_match_cb,
335 flow_dv_dest_array_remove_cb,
336 flow_dv_dest_array_clone_cb,
337 flow_dv_dest_array_clone_free_cb);
338 if (!sh->dest_array_list)
340 /* Init shared flex parsers list, no need lcore_share */
341 snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
342 sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
343 mlx5_flex_parser_create_cb,
344 mlx5_flex_parser_match_cb,
345 mlx5_flex_parser_remove_cb,
346 mlx5_flex_parser_clone_cb,
347 mlx5_flex_parser_clone_free_cb);
348 if (!sh->flex_parsers_dv)
351 #ifdef HAVE_MLX5DV_DR
354 /* Reference counter is zero, we should initialize structures. */
355 domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
356 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
358 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
362 sh->rx_domain = domain;
363 domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
364 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
366 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
370 sh->tx_domain = domain;
371 #ifdef HAVE_MLX5DV_DR_ESWITCH
372 if (priv->config.dv_esw_en) {
373 domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
374 MLX5DV_DR_DOMAIN_TYPE_FDB);
376 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
380 sh->fdb_domain = domain;
383 * The drop action is just some dummy placeholder in rdma-core. It
384 * does not belong to domains and has no any attributes, and, can be
385 * shared by the entire device.
387 sh->dr_drop_action = mlx5_glue->dr_create_flow_action_drop();
388 if (!sh->dr_drop_action) {
389 DRV_LOG(ERR, "FDB mlx5dv_dr_create_flow_action_drop");
394 if (!sh->tunnel_hub && priv->config.dv_miss_info)
395 err = mlx5_alloc_tunnel_hub(sh);
397 DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
400 if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
401 mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
402 mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
404 mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
406 sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
407 if (!priv->config.allow_duplicate_pattern) {
408 #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE
409 DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?");
411 mlx5_glue->dr_allow_duplicate_rules(sh->rx_domain, 0);
412 mlx5_glue->dr_allow_duplicate_rules(sh->tx_domain, 0);
414 mlx5_glue->dr_allow_duplicate_rules(sh->fdb_domain, 0);
417 __mlx5_discovery_misc5_cap(priv);
418 #endif /* HAVE_MLX5DV_DR */
419 sh->default_miss_action =
420 mlx5_glue->dr_create_flow_action_default_miss();
421 if (!sh->default_miss_action)
422 DRV_LOG(WARNING, "Default miss action is not supported.");
423 LIST_INIT(&sh->shared_rxqs);
426 /* Rollback the created objects. */
428 mlx5_glue->dr_destroy_domain(sh->rx_domain);
429 sh->rx_domain = NULL;
432 mlx5_glue->dr_destroy_domain(sh->tx_domain);
433 sh->tx_domain = NULL;
435 if (sh->fdb_domain) {
436 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
437 sh->fdb_domain = NULL;
439 if (sh->dr_drop_action) {
440 mlx5_glue->destroy_flow_action(sh->dr_drop_action);
441 sh->dr_drop_action = NULL;
443 if (sh->pop_vlan_action) {
444 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
445 sh->pop_vlan_action = NULL;
447 if (sh->encaps_decaps) {
448 mlx5_hlist_destroy(sh->encaps_decaps);
449 sh->encaps_decaps = NULL;
451 if (sh->modify_cmds) {
452 mlx5_hlist_destroy(sh->modify_cmds);
453 sh->modify_cmds = NULL;
456 /* tags should be destroyed with flow before. */
457 mlx5_hlist_destroy(sh->tag_table);
458 sh->tag_table = NULL;
460 if (sh->tunnel_hub) {
461 mlx5_release_tunnel_hub(sh, priv->dev_port);
462 sh->tunnel_hub = NULL;
464 mlx5_free_table_hash_list(priv);
465 if (sh->port_id_action_list) {
466 mlx5_list_destroy(sh->port_id_action_list);
467 sh->port_id_action_list = NULL;
469 if (sh->push_vlan_action_list) {
470 mlx5_list_destroy(sh->push_vlan_action_list);
471 sh->push_vlan_action_list = NULL;
473 if (sh->sample_action_list) {
474 mlx5_list_destroy(sh->sample_action_list);
475 sh->sample_action_list = NULL;
477 if (sh->dest_array_list) {
478 mlx5_list_destroy(sh->dest_array_list);
479 sh->dest_array_list = NULL;
485 * Destroy DR related data within private structure.
488 * Pointer to the private device data structure.
491 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
493 struct mlx5_dev_ctx_shared *sh = priv->sh;
495 MLX5_ASSERT(sh && sh->refcnt);
498 MLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));
499 #ifdef HAVE_MLX5DV_DR
501 mlx5_glue->dr_destroy_domain(sh->rx_domain);
502 sh->rx_domain = NULL;
505 mlx5_glue->dr_destroy_domain(sh->tx_domain);
506 sh->tx_domain = NULL;
508 #ifdef HAVE_MLX5DV_DR_ESWITCH
509 if (sh->fdb_domain) {
510 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
511 sh->fdb_domain = NULL;
513 if (sh->dr_drop_action) {
514 mlx5_glue->destroy_flow_action(sh->dr_drop_action);
515 sh->dr_drop_action = NULL;
518 if (sh->pop_vlan_action) {
519 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
520 sh->pop_vlan_action = NULL;
522 #endif /* HAVE_MLX5DV_DR */
523 if (sh->default_miss_action)
524 mlx5_glue->destroy_flow_action
525 (sh->default_miss_action);
526 if (sh->encaps_decaps) {
527 mlx5_hlist_destroy(sh->encaps_decaps);
528 sh->encaps_decaps = NULL;
530 if (sh->modify_cmds) {
531 mlx5_hlist_destroy(sh->modify_cmds);
532 sh->modify_cmds = NULL;
535 /* tags should be destroyed with flow before. */
536 mlx5_hlist_destroy(sh->tag_table);
537 sh->tag_table = NULL;
539 if (sh->tunnel_hub) {
540 mlx5_release_tunnel_hub(sh, priv->dev_port);
541 sh->tunnel_hub = NULL;
543 mlx5_free_table_hash_list(priv);
544 if (sh->port_id_action_list) {
545 mlx5_list_destroy(sh->port_id_action_list);
546 sh->port_id_action_list = NULL;
548 if (sh->push_vlan_action_list) {
549 mlx5_list_destroy(sh->push_vlan_action_list);
550 sh->push_vlan_action_list = NULL;
552 if (sh->sample_action_list) {
553 mlx5_list_destroy(sh->sample_action_list);
554 sh->sample_action_list = NULL;
556 if (sh->dest_array_list) {
557 mlx5_list_destroy(sh->dest_array_list);
558 sh->dest_array_list = NULL;
563 * Initialize shared data between primary and secondary process.
565 * A memzone is reserved by primary process and secondary processes attach to
569 * 0 on success, a negative errno value otherwise and rte_errno is set.
572 mlx5_init_shared_data(void)
574 const struct rte_memzone *mz;
577 rte_spinlock_lock(&mlx5_shared_data_lock);
578 if (mlx5_shared_data == NULL) {
579 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
580 /* Allocate shared memory. */
581 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
582 sizeof(*mlx5_shared_data),
586 "Cannot allocate mlx5 shared data");
590 mlx5_shared_data = mz->addr;
591 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
592 rte_spinlock_init(&mlx5_shared_data->lock);
594 /* Lookup allocated shared memory. */
595 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
598 "Cannot attach mlx5 shared data");
602 mlx5_shared_data = mz->addr;
603 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
607 rte_spinlock_unlock(&mlx5_shared_data_lock);
612 * PMD global initialization.
614 * Independent from individual device, this function initializes global
615 * per-PMD data structures distinguishing primary and secondary processes.
616 * Hence, each initialization is called once per a process.
619 * 0 on success, a negative errno value otherwise and rte_errno is set.
624 struct mlx5_shared_data *sd;
625 struct mlx5_local_data *ld = &mlx5_local_data;
628 if (mlx5_init_shared_data())
630 sd = mlx5_shared_data;
632 rte_spinlock_lock(&sd->lock);
633 switch (rte_eal_process_type()) {
634 case RTE_PROC_PRIMARY:
637 ret = mlx5_mp_init_primary(MLX5_MP_NAME,
638 mlx5_mp_os_primary_handle);
641 sd->init_done = true;
643 case RTE_PROC_SECONDARY:
646 ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
647 mlx5_mp_os_secondary_handle);
651 ld->init_done = true;
657 rte_spinlock_unlock(&sd->lock);
662 * DV flow counter mode detect and config.
665 * Pointer to rte_eth_dev structure.
669 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
671 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
672 struct mlx5_priv *priv = dev->data->dev_private;
673 struct mlx5_dev_ctx_shared *sh = priv->sh;
676 #ifndef HAVE_IBV_DEVX_ASYNC
680 if (!sh->devx || !priv->config.dv_flow_en ||
681 !priv->config.hca_attr.flow_counters_dump ||
682 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
683 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
687 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
688 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
689 priv->config.hca_attr.flow_counters_dump,
690 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
691 /* Initialize fallback mode only on the port initializes sh. */
693 sh->cmng.counter_fallback = fallback;
694 else if (fallback != sh->cmng.counter_fallback)
695 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
696 "with others:%d.", PORT_ID(priv), fallback);
701 * DR flow drop action support detect.
704 * Pointer to rte_eth_dev structure.
708 mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused)
710 #ifdef HAVE_MLX5DV_DR
711 struct mlx5_priv *priv = dev->data->dev_private;
713 if (!priv->config.dv_flow_en || !priv->sh->dr_drop_action)
716 * DR supports drop action placeholder when it is supported;
717 * otherwise, use the queue drop action.
719 if (!priv->sh->drop_action_check_flag) {
720 if (!mlx5_flow_discover_dr_action_support(dev))
721 priv->sh->dr_drop_action_en = 1;
722 priv->sh->drop_action_check_flag = 1;
724 if (priv->sh->dr_drop_action_en)
725 priv->root_drop_action = priv->sh->dr_drop_action;
727 priv->root_drop_action = priv->drop_queue.hrxq->action;
732 mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
734 struct mlx5_priv *priv = dev->data->dev_private;
735 void *ctx = priv->sh->cdev->ctx;
737 priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
738 if (!priv->q_counters) {
739 struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
742 DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
743 "by DevX - fall-back to use the kernel driver global "
744 "queue counter.", dev->data->port_id);
745 /* Create WQ by kernel and query its queue counter ID. */
747 wq = mlx5_glue->create_wq(ctx,
748 &(struct ibv_wq_init_attr){
749 .wq_type = IBV_WQT_RQ,
752 .pd = priv->sh->cdev->pd,
756 /* Counter is assigned only on RDY state. */
757 int ret = mlx5_glue->modify_wq(wq,
758 &(struct ibv_wq_attr){
759 .attr_mask = IBV_WQ_ATTR_STATE,
760 .wq_state = IBV_WQS_RDY,
764 mlx5_devx_cmd_wq_query(wq,
765 &priv->counter_set_id);
766 claim_zero(mlx5_glue->destroy_wq(wq));
768 claim_zero(mlx5_glue->destroy_cq(cq));
771 priv->counter_set_id = priv->q_counters->id;
773 if (priv->counter_set_id == 0)
774 DRV_LOG(INFO, "Part of the port %d statistics will not be "
775 "available.", dev->data->port_id);
779 * Check if representor spawn info match devargs.
782 * Verbs device parameters (name, port, switch_info) to spawn.
784 * Device devargs to probe.
790 mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
791 struct rte_eth_devargs *eth_da)
793 struct mlx5_switch_info *switch_info = &spawn->info;
796 uint16_t repr_id = mlx5_representor_id_encode(switch_info,
799 switch (eth_da->type) {
800 case RTE_ETH_REPRESENTOR_SF:
801 if (!(spawn->info.port_name == -1 &&
802 switch_info->name_type ==
803 MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
804 switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF) {
809 case RTE_ETH_REPRESENTOR_VF:
810 /* Allows HPF representor index -1 as exception. */
811 if (!(spawn->info.port_name == -1 &&
812 switch_info->name_type ==
813 MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
814 switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF) {
819 case RTE_ETH_REPRESENTOR_NONE:
824 DRV_LOG(ERR, "unsupported representor type");
827 /* Check representor ID: */
828 for (p = 0; p < eth_da->nb_ports; ++p) {
829 if (spawn->pf_bond < 0) {
830 /* For non-LAG mode, allow and ignore pf. */
831 switch_info->pf_num = eth_da->ports[p];
832 repr_id = mlx5_representor_id_encode(switch_info,
835 for (f = 0; f < eth_da->nb_representor_ports; ++f) {
836 id = MLX5_REPRESENTOR_ID
837 (eth_da->ports[p], eth_da->type,
838 eth_da->representor_ports[f]);
848 * Spawn an Ethernet device from Verbs information.
851 * Backing DPDK device.
853 * Verbs device parameters (name, port, switch_info) to spawn.
855 * Device configuration parameters.
860 * A valid Ethernet device object on success, NULL otherwise and rte_errno
861 * is set. The following errors are defined:
863 * EBUSY: device is not supposed to be spawned.
864 * EEXIST: device is already spawned
866 static struct rte_eth_dev *
867 mlx5_dev_spawn(struct rte_device *dpdk_dev,
868 struct mlx5_dev_spawn_data *spawn,
869 struct mlx5_dev_config *config,
870 struct rte_eth_devargs *eth_da)
872 const struct mlx5_switch_info *switch_info = &spawn->info;
873 struct mlx5_dev_ctx_shared *sh = NULL;
874 struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
875 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
876 struct rte_eth_dev *eth_dev = NULL;
877 struct mlx5_priv *priv = NULL;
879 unsigned int hw_padding = 0;
881 unsigned int mpls_en = 0;
882 unsigned int swp = 0;
883 unsigned int mprq = 0;
884 struct rte_ether_addr mac;
885 char name[RTE_ETH_NAME_MAX_LEN];
886 int own_domain_id = 0;
888 struct mlx5_port_info vport_info = { .query_flags = 0 };
892 /* Determine if this port representor is supposed to be spawned. */
893 if (switch_info->representor && dpdk_dev->devargs &&
894 !mlx5_representor_match(spawn, eth_da))
896 /* Build device name. */
897 if (spawn->pf_bond < 0) {
899 if (!switch_info->representor)
900 strlcpy(name, dpdk_dev->name, sizeof(name));
902 err = snprintf(name, sizeof(name), "%s_representor_%s%u",
904 switch_info->name_type ==
905 MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
906 switch_info->port_name);
908 /* Bonding device. */
909 if (!switch_info->representor) {
910 err = snprintf(name, sizeof(name), "%s_%s",
911 dpdk_dev->name, spawn->phys_dev_name);
913 err = snprintf(name, sizeof(name), "%s_%s_representor_c%dpf%d%s%u",
914 dpdk_dev->name, spawn->phys_dev_name,
915 switch_info->ctrl_num,
917 switch_info->name_type ==
918 MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
919 switch_info->port_name);
922 if (err >= (int)sizeof(name))
923 DRV_LOG(WARNING, "device name overflow %s", name);
924 /* check if the device is already spawned */
925 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
929 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
930 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
931 struct mlx5_mp_id mp_id;
933 eth_dev = rte_eth_dev_attach_secondary(name);
934 if (eth_dev == NULL) {
935 DRV_LOG(ERR, "can not attach rte ethdev");
939 eth_dev->device = dpdk_dev;
940 eth_dev->dev_ops = &mlx5_dev_sec_ops;
941 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
942 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
943 err = mlx5_proc_priv_init(eth_dev);
946 mlx5_mp_id_init(&mp_id, eth_dev->data->port_id);
947 /* Receive command fd from primary process */
948 err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
951 /* Remap UAR for Tx queues. */
952 err = mlx5_tx_uar_init_secondary(eth_dev, err);
956 * Ethdev pointer is still required as input since
957 * the primary device is not accessible from the
960 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
961 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
964 mlx5_dev_close(eth_dev);
968 * Some parameters ("tx_db_nc" in particularly) are needed in
969 * advance to create dv/verbs device context. We proceed the
970 * devargs here to get ones, and later proceed devargs again
971 * to override some hardware settings.
973 err = mlx5_args(config, dpdk_dev->devargs);
976 DRV_LOG(ERR, "failed to process device arguments: %s",
977 strerror(rte_errno));
980 if (config->dv_miss_info) {
981 if (switch_info->master || switch_info->representor)
982 config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
984 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
987 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
988 config->dest_tir = 1;
990 #ifdef HAVE_IBV_MLX5_MOD_SWP
991 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
994 * Multi-packet send is supported by ConnectX-4 Lx PF as well
995 * as all ConnectX-5 devices.
997 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
998 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
1000 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1001 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
1003 mlx5_glue->dv_query_device(sh->cdev->ctx, &dv_attr);
1004 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
1005 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
1006 DRV_LOG(DEBUG, "enhanced MPW is supported");
1007 mps = MLX5_MPW_ENHANCED;
1009 DRV_LOG(DEBUG, "MPW is supported");
1013 DRV_LOG(DEBUG, "MPW isn't supported");
1014 mps = MLX5_MPW_DISABLED;
1016 #ifdef HAVE_IBV_MLX5_MOD_SWP
1017 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
1018 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
1019 DRV_LOG(DEBUG, "SWP support: %u", swp);
1021 config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
1022 MLX5_SW_PARSING_TSO_CAP);
1023 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1024 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
1025 struct mlx5dv_striding_rq_caps mprq_caps =
1026 dv_attr.striding_rq_caps;
1028 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
1029 mprq_caps.min_single_stride_log_num_of_bytes);
1030 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
1031 mprq_caps.max_single_stride_log_num_of_bytes);
1032 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
1033 mprq_caps.min_single_wqe_log_num_of_strides);
1034 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
1035 mprq_caps.max_single_wqe_log_num_of_strides);
1036 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
1037 mprq_caps.supported_qpts);
1038 DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
1039 config->mprq.log_min_stride_wqe_size);
1040 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
1042 config->mprq.log_min_stride_size =
1043 mprq_caps.min_single_stride_log_num_of_bytes;
1044 config->mprq.log_max_stride_size =
1045 mprq_caps.max_single_stride_log_num_of_bytes;
1046 config->mprq.log_min_stride_num =
1047 mprq_caps.min_single_wqe_log_num_of_strides;
1048 config->mprq.log_max_stride_num =
1049 mprq_caps.max_single_wqe_log_num_of_strides;
1052 /* Rx CQE compression is enabled by default. */
1053 config->cqe_comp = 1;
1054 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1055 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
1056 config->tunnel_en = dv_attr.tunnel_offloads_caps &
1057 (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
1058 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
1059 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
1061 if (config->tunnel_en) {
1062 DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
1064 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ? "[VXLAN]" : "",
1066 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ? "[GRE]" : "",
1068 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ? "[GENEVE]" : ""
1071 DRV_LOG(DEBUG, "tunnel offloading is not supported");
1075 "tunnel offloading disabled due to old OFED/rdma-core version");
1077 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1078 mpls_en = ((dv_attr.tunnel_offloads_caps &
1079 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
1080 (dv_attr.tunnel_offloads_caps &
1081 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
1082 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
1083 mpls_en ? "" : "not ");
1085 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
1086 " old OFED/rdma-core version or firmware configuration");
1088 config->mpls_en = mpls_en;
1089 nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1090 /* Check port status. */
1091 if (spawn->phys_port <= UINT8_MAX) {
1092 /* Legacy Verbs api only support u8 port number. */
1093 err = mlx5_glue->query_port(sh->cdev->ctx, spawn->phys_port,
1096 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1099 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1100 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1104 } else if (nl_rdma >= 0) {
1105 /* IB doesn't allow more than 255 ports, must be Ethernet. */
1106 err = mlx5_nl_port_state(nl_rdma,
1107 spawn->phys_dev_name,
1110 DRV_LOG(INFO, "Failed to get netlink port state: %s",
1111 strerror(rte_errno));
1115 port_attr.state = (enum ibv_port_state)err;
1117 if (port_attr.state != IBV_PORT_ACTIVE)
1118 DRV_LOG(INFO, "port is not active: \"%s\" (%d)",
1119 mlx5_glue->port_state_str(port_attr.state),
1121 /* Allocate private eth device data. */
1122 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1124 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1126 DRV_LOG(ERR, "priv allocation failure");
1131 priv->dev_port = spawn->phys_port;
1132 priv->pci_dev = spawn->pci_dev;
1133 priv->mtu = RTE_ETHER_MTU;
1134 /* Some internal functions rely on Netlink sockets, open them now. */
1135 priv->nl_socket_rdma = nl_rdma;
1136 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1137 priv->representor = !!switch_info->representor;
1138 priv->master = !!switch_info->master;
1139 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1140 priv->vport_meta_tag = 0;
1141 priv->vport_meta_mask = 0;
1142 priv->pf_bond = spawn->pf_bond;
1145 "dev_port=%u bus=%s pci=%s master=%d representor=%d pf_bond=%d\n",
1146 priv->dev_port, dpdk_dev->bus->name,
1147 priv->pci_dev ? priv->pci_dev->name : "NONE",
1148 priv->master, priv->representor, priv->pf_bond);
1151 * If we have E-Switch we should determine the vport attributes.
1152 * E-Switch may use either source vport field or reg_c[0] metadata
1153 * register to match on vport index. The engaged part of metadata
1154 * register is defined by mask.
1156 if (switch_info->representor || switch_info->master) {
1157 err = mlx5_glue->devx_port_query(sh->cdev->ctx,
1162 "Cannot query devx port %d on device %s",
1163 spawn->phys_port, spawn->phys_dev_name);
1164 vport_info.query_flags = 0;
1167 if (vport_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1168 priv->vport_meta_tag = vport_info.vport_meta_tag;
1169 priv->vport_meta_mask = vport_info.vport_meta_mask;
1170 if (!priv->vport_meta_mask) {
1172 "vport zero mask for port %d on bonding device %s",
1173 spawn->phys_port, spawn->phys_dev_name);
1177 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1179 "Invalid vport tag for port %d on bonding device %s",
1180 spawn->phys_port, spawn->phys_dev_name);
1185 if (vport_info.query_flags & MLX5_PORT_QUERY_VPORT) {
1186 priv->vport_id = vport_info.vport_id;
1187 } else if (spawn->pf_bond >= 0 &&
1188 (switch_info->representor || switch_info->master)) {
1190 "Cannot deduce vport index for port %d on bonding device %s",
1191 spawn->phys_port, spawn->phys_dev_name);
1196 * Suppose vport index in compatible way. Kernel/rdma_core
1197 * support single E-Switch per PF configurations only and
1198 * vport_id field contains the vport index for associated VF,
1199 * which is deduced from representor port name.
1200 * For example, let's have the IB device port 10, it has
1201 * attached network device eth0, which has port name attribute
1202 * pf0vf2, we can deduce the VF number as 2, and set vport index
1203 * as 3 (2+1). This assigning schema should be changed if the
1204 * multiple E-Switch instances per PF configurations or/and PCI
1205 * subfunctions are added.
1207 priv->vport_id = switch_info->representor ?
1208 switch_info->port_name + 1 : -1;
1210 priv->representor_id = mlx5_representor_id_encode(switch_info,
1213 * Look for sibling devices in order to reuse their switch domain
1214 * if any, otherwise allocate one.
1216 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
1217 const struct mlx5_priv *opriv =
1218 rte_eth_devices[port_id].data->dev_private;
1221 opriv->sh != priv->sh ||
1223 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1225 priv->domain_id = opriv->domain_id;
1226 DRV_LOG(DEBUG, "dev_port-%u inherit domain_id=%u\n",
1227 priv->dev_port, priv->domain_id);
1230 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1231 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1234 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1235 strerror(rte_errno));
1239 DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
1240 priv->dev_port, priv->domain_id);
1242 /* Override some values set by hardware configuration. */
1243 mlx5_args(config, dpdk_dev->devargs);
1244 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
1247 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1248 IBV_DEVICE_RAW_IP_CSUM);
1249 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1250 (config->hw_csum ? "" : "not "));
1251 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1252 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1253 DRV_LOG(DEBUG, "counters are not supported");
1255 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
1256 if (config->dv_flow_en) {
1257 DRV_LOG(WARNING, "DV flow is not supported");
1258 config->dv_flow_en = 0;
1261 config->ind_table_max_size =
1262 sh->device_attr.max_rwq_indirection_table_size;
1264 * Remove this check once DPDK supports larger/variable
1265 * indirection tables.
1267 if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
1268 config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
1269 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1270 config->ind_table_max_size);
1271 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1272 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1273 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1274 (config->hw_vlan_strip ? "" : "not "));
1275 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1276 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1277 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1278 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1279 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1280 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1281 IBV_DEVICE_PCI_WRITE_END_PADDING);
1283 if (config->hw_padding && !hw_padding) {
1284 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1285 config->hw_padding = 0;
1286 } else if (config->hw_padding) {
1287 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1289 config->tso = (sh->device_attr.max_tso > 0 &&
1290 (sh->device_attr.tso_supported_qpts &
1291 (1 << IBV_QPT_RAW_PACKET)));
1293 config->tso_max_payload_sz = sh->device_attr.max_tso;
1295 * MPW is disabled by default, while the Enhanced MPW is enabled
1298 if (config->mps == MLX5_ARG_UNSET)
1299 config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1302 config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
1303 DRV_LOG(INFO, "%sMPS is %s",
1304 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
1305 config->mps == MLX5_MPW ? "legacy " : "",
1306 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1308 config->hca_attr = sh->cdev->config.hca_attr;
1309 sh->steering_format_version =
1310 config->hca_attr.steering_format_version;
1311 /* Check for LRO support. */
1312 if (config->dest_tir && config->hca_attr.lro_cap &&
1313 config->dv_flow_en) {
1314 /* TBD check tunnel lro caps. */
1315 config->lro.supported = config->hca_attr.lro_cap;
1316 DRV_LOG(DEBUG, "Device supports LRO");
1318 * If LRO timeout is not configured by application,
1319 * use the minimal supported value.
1321 if (!config->lro.timeout)
1322 config->lro.timeout =
1323 config->hca_attr.lro_timer_supported_periods[0];
1324 DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
1325 config->lro.timeout);
1326 DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
1327 "required for coalescing is %d bytes",
1328 config->hca_attr.lro_min_mss_size);
1330 #if defined(HAVE_MLX5DV_DR) && \
1331 (defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
1332 defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
1333 if (config->hca_attr.qos.sup &&
1334 config->hca_attr.qos.flow_meter_old &&
1335 config->dv_flow_en) {
1336 uint8_t reg_c_mask =
1337 config->hca_attr.qos.flow_meter_reg_c_ids;
1339 * Meter needs two REG_C's for color match and pre-sfx
1340 * flow match. Here get the REG_C for color match.
1341 * REG_C_0 and REG_C_1 is reserved for metadata feature.
1344 if (__builtin_popcount(reg_c_mask) < 1) {
1346 DRV_LOG(WARNING, "No available register for"
1350 * The meter color register is used by the
1351 * flow-hit feature as well.
1352 * The flow-hit feature must use REG_C_3
1353 * Prefer REG_C_3 if it is available.
1355 if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
1356 priv->mtr_color_reg = REG_C_3;
1358 priv->mtr_color_reg = ffs(reg_c_mask)
1361 priv->mtr_reg_share =
1362 config->hca_attr.qos.flow_meter;
1363 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
1364 priv->mtr_color_reg);
1367 if (config->hca_attr.qos.sup &&
1368 config->hca_attr.qos.flow_meter_aso_sup) {
1369 uint32_t log_obj_size =
1370 rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
1372 config->hca_attr.qos.log_meter_aso_granularity &&
1374 config->hca_attr.qos.log_meter_aso_max_alloc)
1375 sh->meter_aso_en = 1;
1378 err = mlx5_aso_flow_mtrs_mng_init(priv->sh);
1384 if (config->hca_attr.flow.tunnel_header_0_1)
1385 sh->tunnel_header_0_1 = 1;
1387 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
1388 if (config->hca_attr.flow_hit_aso &&
1389 priv->mtr_color_reg == REG_C_3) {
1390 sh->flow_hit_aso_en = 1;
1391 err = mlx5_flow_aso_age_mng_init(sh);
1396 DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
1398 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
1399 #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
1400 defined(HAVE_MLX5_DR_ACTION_ASO_CT)
1401 if (config->hca_attr.ct_offload &&
1402 priv->mtr_color_reg == REG_C_3) {
1403 err = mlx5_flow_aso_ct_mng_init(sh);
1408 DRV_LOG(DEBUG, "CT ASO is supported.");
1411 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
1412 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1413 if (config->hca_attr.log_max_ft_sampler_num > 0 &&
1414 config->dv_flow_en) {
1415 priv->sampler_en = 1;
1416 DRV_LOG(DEBUG, "Sampler enabled!");
1418 priv->sampler_en = 0;
1419 if (!config->hca_attr.log_max_ft_sampler_num)
1421 "No available register for sampler.");
1423 DRV_LOG(DEBUG, "DV flow is not supported!");
1427 if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
1428 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
1429 DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
1430 config->cqe_comp = 0;
1432 if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
1433 (!sh->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
1434 DRV_LOG(WARNING, "Flow Tag CQE compression"
1435 " format isn't supported.");
1436 config->cqe_comp = 0;
1438 if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
1439 (!sh->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
1440 DRV_LOG(WARNING, "L3/L4 Header CQE compression"
1441 " format isn't supported.");
1442 config->cqe_comp = 0;
1444 DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
1445 config->cqe_comp ? "" : "not ");
1446 if (config->tx_pp) {
1447 DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
1448 config->hca_attr.dev_freq_khz);
1449 DRV_LOG(DEBUG, "Packet pacing is %ssupported",
1450 config->hca_attr.qos.packet_pacing ? "" : "not ");
1451 DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
1452 config->hca_attr.cross_channel ? "" : "not ");
1453 DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
1454 config->hca_attr.wqe_index_ignore ? "" : "not ");
1455 DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
1456 config->hca_attr.non_wire_sq ? "" : "not ");
1457 DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
1458 config->hca_attr.log_max_static_sq_wq ? "" : "not ",
1459 config->hca_attr.log_max_static_sq_wq);
1460 DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
1461 config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
1463 DRV_LOG(ERR, "DevX is required for packet pacing");
1467 if (!config->hca_attr.qos.packet_pacing) {
1468 DRV_LOG(ERR, "Packet pacing is not supported");
1472 if (!config->hca_attr.cross_channel) {
1473 DRV_LOG(ERR, "Cross channel operations are"
1474 " required for packet pacing");
1478 if (!config->hca_attr.wqe_index_ignore) {
1479 DRV_LOG(ERR, "WQE index ignore feature is"
1480 " required for packet pacing");
1484 if (!config->hca_attr.non_wire_sq) {
1485 DRV_LOG(ERR, "Non-wire SQ feature is"
1486 " required for packet pacing");
1490 if (!config->hca_attr.log_max_static_sq_wq) {
1491 DRV_LOG(ERR, "Static WQE SQ feature is"
1492 " required for packet pacing");
1496 if (!config->hca_attr.qos.wqe_rate_pp) {
1497 DRV_LOG(ERR, "WQE rate mode is required"
1498 " for packet pacing");
1502 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1503 DRV_LOG(ERR, "DevX does not provide UAR offset,"
1504 " can't create queues for packet pacing");
1509 if (config->std_delay_drop || config->hp_delay_drop) {
1510 if (!config->hca_attr.rq_delay_drop) {
1511 config->std_delay_drop = 0;
1512 config->hp_delay_drop = 0;
1514 "dev_port-%u: Rxq delay drop is not supported",
1519 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
1521 err = config->hca_attr.access_register_user ?
1522 mlx5_devx_cmd_register_read
1523 (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
1524 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
1528 /* MTUTC register is read successfully. */
1529 ts_mode = MLX5_GET(register_mtutc, reg,
1531 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1532 config->rt_timestamp = 1;
1534 /* Kernel does not support register reading. */
1535 if (config->hca_attr.dev_freq_khz ==
1536 (NS_PER_S / MS_PER_S))
1537 config->rt_timestamp = 1;
1541 * If HW has bug working with tunnel packet decapsulation and
1542 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
1543 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1545 if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
1546 config->hw_fcs_strip = 0;
1547 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1548 (config->hw_fcs_strip ? "" : "not "));
1549 if (config->mprq.enabled && !mprq) {
1550 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1551 config->mprq.enabled = 0;
1553 if (config->max_dump_files_num == 0)
1554 config->max_dump_files_num = 128;
1555 eth_dev = rte_eth_dev_allocate(name);
1556 if (eth_dev == NULL) {
1557 DRV_LOG(ERR, "can not allocate rte ethdev");
1561 if (priv->representor) {
1562 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1563 eth_dev->data->representor_id = priv->representor_id;
1564 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
1565 struct mlx5_priv *opriv =
1566 rte_eth_devices[port_id].data->dev_private;
1569 opriv->domain_id == priv->domain_id &&
1570 opriv->sh == priv->sh) {
1571 eth_dev->data->backer_port_id = port_id;
1575 if (port_id >= RTE_MAX_ETHPORTS)
1576 eth_dev->data->backer_port_id = eth_dev->data->port_id;
1578 priv->mp_id.port_id = eth_dev->data->port_id;
1579 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
1581 * Store associated network device interface index. This index
1582 * is permanent throughout the lifetime of device. So, we may store
1583 * the ifindex here and use the cached value further.
1585 MLX5_ASSERT(spawn->ifindex);
1586 priv->if_index = spawn->ifindex;
1587 priv->lag_affinity_idx = sh->refcnt - 1;
1588 eth_dev->data->dev_private = priv;
1589 priv->dev_data = eth_dev->data;
1590 eth_dev->data->mac_addrs = priv->mac;
1591 eth_dev->device = dpdk_dev;
1592 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1593 /* Configure the first MAC address by default. */
1594 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1596 "port %u cannot get MAC address, is mlx5_en"
1597 " loaded? (errno: %s)",
1598 eth_dev->data->port_id, strerror(rte_errno));
1603 "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
1604 eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
1605 #ifdef RTE_LIBRTE_MLX5_DEBUG
1607 char ifname[MLX5_NAMESIZE];
1609 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1610 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1611 eth_dev->data->port_id, ifname);
1613 DRV_LOG(DEBUG, "port %u ifname is unknown",
1614 eth_dev->data->port_id);
1617 /* Get actual MTU if possible. */
1618 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1623 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1625 /* Initialize burst functions to prevent crashes before link-up. */
1626 eth_dev->rx_pkt_burst = removed_rx_burst;
1627 eth_dev->tx_pkt_burst = removed_tx_burst;
1628 eth_dev->dev_ops = &mlx5_dev_ops;
1629 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1630 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1631 eth_dev->rx_queue_count = mlx5_rx_queue_count;
1632 /* Register MAC address. */
1633 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1634 if (config->vf && config->vf_nl_en)
1635 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1636 mlx5_ifindex(eth_dev),
1637 eth_dev->data->mac_addrs,
1638 MLX5_MAX_MAC_ADDRESSES);
1639 priv->ctrl_flows = 0;
1640 rte_spinlock_init(&priv->flow_list_lock);
1641 TAILQ_INIT(&priv->flow_meters);
1642 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
1643 if (!priv->mtr_profile_tbl)
1645 /* Bring Ethernet device up. */
1646 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1647 eth_dev->data->port_id);
1648 mlx5_set_link_up(eth_dev);
1650 * Even though the interrupt handler is not installed yet,
1651 * interrupts will still trigger on the async_fd from
1652 * Verbs context returned by ibv_open_device().
1654 mlx5_link_update(eth_dev, 0);
1655 #ifdef HAVE_MLX5DV_DR_ESWITCH
1656 if (!(config->hca_attr.eswitch_manager && config->dv_flow_en &&
1657 (switch_info->representor || switch_info->master)))
1658 config->dv_esw_en = 0;
1660 config->dv_esw_en = 0;
1662 /* Detect minimal data bytes to inline. */
1663 mlx5_set_min_inline(spawn, config);
1664 /* Store device configuration on private structure. */
1665 priv->config = *config;
1666 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
1667 icfg[i].release_mem_en = !!config->reclaim_mode;
1668 if (config->reclaim_mode)
1669 icfg[i].per_core_cache = 0;
1670 priv->flows[i] = mlx5_ipool_create(&icfg[i]);
1671 if (!priv->flows[i])
1674 /* Create context for virtual machine VLAN workaround. */
1675 priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1676 if (config->dv_flow_en) {
1677 err = mlx5_alloc_shared_dr(priv);
1680 if (mlx5_flex_item_port_init(eth_dev) < 0)
1683 if (sh->devx && config->dv_flow_en && config->dest_tir) {
1684 priv->obj_ops = devx_obj_ops;
1685 mlx5_queue_counter_id_prepare(eth_dev);
1686 priv->obj_ops.lb_dummy_queue_create =
1687 mlx5_rxq_ibv_obj_dummy_lb_create;
1688 priv->obj_ops.lb_dummy_queue_release =
1689 mlx5_rxq_ibv_obj_dummy_lb_release;
1690 } else if (spawn->max_port > UINT8_MAX) {
1691 /* Verbs can't support ports larger than 255 by design. */
1692 DRV_LOG(ERR, "must enable DV and ESW when RDMA link ports > 255");
1696 priv->obj_ops = ibv_obj_ops;
1698 if (config->tx_pp &&
1699 priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new) {
1701 * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support
1702 * packet pacing and already checked above.
1703 * Hence, we should only make sure the SQs will be created
1704 * with DevX, not with Verbs.
1705 * Verbs allocates the SQ UAR on its own and it can't be shared
1706 * with Clock Queue UAR as required for Tx scheduling.
1708 DRV_LOG(ERR, "Verbs SQs, UAR can't be shared as required for packet pacing");
1712 priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
1713 if (!priv->drop_queue.hrxq)
1715 /* Port representor shares the same max priority with pf port. */
1716 if (!priv->sh->flow_priority_check_flag) {
1717 /* Supported Verbs flow priority number detection. */
1718 err = mlx5_flow_discover_priorities(eth_dev);
1719 priv->sh->flow_max_priority = err;
1720 priv->sh->flow_priority_check_flag = 1;
1722 err = priv->sh->flow_max_priority;
1728 if (!priv->config.dv_esw_en &&
1729 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1730 DRV_LOG(WARNING, "metadata mode %u is not supported "
1731 "(no E-Switch)", priv->config.dv_xmeta_en);
1732 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1734 mlx5_set_metadata_mask(eth_dev);
1735 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1736 !priv->sh->dv_regc0_mask) {
1737 DRV_LOG(ERR, "metadata mode %u is not supported "
1738 "(no metadata reg_c[0] is available)",
1739 priv->config.dv_xmeta_en);
1743 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
1744 mlx5_hrxq_create_cb,
1746 mlx5_hrxq_remove_cb,
1748 mlx5_hrxq_clone_free_cb);
1751 rte_rwlock_init(&priv->ind_tbls_lock);
1752 /* Query availability of metadata reg_c's. */
1753 if (!priv->sh->metadata_regc_check_flag) {
1754 err = mlx5_flow_discover_mreg_c(eth_dev);
1760 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1762 "port %u extensive metadata register is not supported",
1763 eth_dev->data->port_id);
1764 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1765 DRV_LOG(ERR, "metadata mode %u is not supported "
1766 "(no metadata registers available)",
1767 priv->config.dv_xmeta_en);
1772 if (priv->config.dv_flow_en &&
1773 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1774 mlx5_flow_ext_mreg_supported(eth_dev) &&
1775 priv->sh->dv_regc0_mask) {
1776 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1777 MLX5_FLOW_MREG_HTABLE_SZ,
1778 false, true, eth_dev,
1779 flow_dv_mreg_create_cb,
1780 flow_dv_mreg_match_cb,
1781 flow_dv_mreg_remove_cb,
1782 flow_dv_mreg_clone_cb,
1783 flow_dv_mreg_clone_free_cb);
1784 if (!priv->mreg_cp_tbl) {
1789 rte_spinlock_init(&priv->shared_act_sl);
1790 mlx5_flow_counter_mode_config(eth_dev);
1791 mlx5_flow_drop_action_config(eth_dev);
1792 if (priv->config.dv_flow_en)
1793 eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1797 if (priv->mreg_cp_tbl)
1798 mlx5_hlist_destroy(priv->mreg_cp_tbl);
1800 mlx5_os_free_shared_dr(priv);
1801 if (priv->nl_socket_route >= 0)
1802 close(priv->nl_socket_route);
1803 if (priv->vmwa_context)
1804 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1805 if (eth_dev && priv->drop_queue.hrxq)
1806 mlx5_drop_action_destroy(eth_dev);
1807 if (priv->mtr_profile_tbl)
1808 mlx5_l3t_destroy(priv->mtr_profile_tbl);
1810 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1812 mlx5_list_destroy(priv->hrxqs);
1813 if (eth_dev && priv->flex_item_map)
1814 mlx5_flex_item_port_cleanup(eth_dev);
1816 if (eth_dev != NULL)
1817 eth_dev->data->dev_private = NULL;
1819 if (eth_dev != NULL) {
1820 /* mac_addrs must not be freed alone because part of
1823 eth_dev->data->mac_addrs = NULL;
1824 rte_eth_dev_release_port(eth_dev);
1827 mlx5_free_shared_dev_ctx(sh);
1830 MLX5_ASSERT(err > 0);
1836 * Comparison callback to sort device data.
1838 * This is meant to be used with qsort().
1841 * Pointer to pointer to first data object.
1843 * Pointer to pointer to second data object.
1846 * 0 if both objects are equal, less than 0 if the first argument is less
1847 * than the second, greater than 0 otherwise.
1850 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1852 const struct mlx5_switch_info *si_a =
1853 &((const struct mlx5_dev_spawn_data *)a)->info;
1854 const struct mlx5_switch_info *si_b =
1855 &((const struct mlx5_dev_spawn_data *)b)->info;
1858 /* Master device first. */
1859 ret = si_b->master - si_a->master;
1862 /* Then representor devices. */
1863 ret = si_b->representor - si_a->representor;
1866 /* Unidentified devices come last in no specific order. */
1867 if (!si_a->representor)
1869 /* Order representors by name. */
1870 return si_a->port_name - si_b->port_name;
1874 * Match PCI information for possible slaves of bonding device.
1876 * @param[in] ibdev_name
1877 * Name of Infiniband device.
1878 * @param[in] pci_dev
1879 * Pointer to primary PCI address structure to match.
1880 * @param[in] nl_rdma
1881 * Netlink RDMA group socket handle.
1883 * Representor owner PF index.
1884 * @param[out] bond_info
1885 * Pointer to bonding information.
1888 * negative value if no bonding device found, otherwise
1889 * positive index of slave PF in bonding.
1892 mlx5_device_bond_pci_match(const char *ibdev_name,
1893 const struct rte_pci_addr *pci_dev,
1894 int nl_rdma, uint16_t owner,
1895 struct mlx5_bond_info *bond_info)
1897 char ifname[IF_NAMESIZE + 1];
1898 unsigned int ifindex;
1900 FILE *bond_file = NULL, *file;
1903 uint8_t cur_guid[32] = {0};
1904 uint8_t guid[32] = {0};
1907 * Try to get master device name. If something goes wrong suppose
1908 * the lack of kernel support and no bonding devices.
1910 memset(bond_info, 0, sizeof(*bond_info));
1913 if (!strstr(ibdev_name, "bond"))
1915 np = mlx5_nl_portnum(nl_rdma, ibdev_name);
1918 if (mlx5_get_device_guid(pci_dev, cur_guid, sizeof(cur_guid)) < 0)
1921 * The master device might not be on the predefined port(not on port
1922 * index 1, it is not guaranteed), we have to scan all Infiniband
1923 * device ports and find master.
1925 for (i = 1; i <= np; ++i) {
1926 /* Check whether Infiniband port is populated. */
1927 ifindex = mlx5_nl_ifindex(nl_rdma, ibdev_name, i);
1930 if (!if_indextoname(ifindex, ifname))
1932 /* Try to read bonding slave names from sysfs. */
1934 "/sys/class/net/%s/master/bonding/slaves", ifname);
1935 bond_file = fopen(slaves, "r");
1941 /* Use safe format to check maximal buffer length. */
1942 MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1943 while (fscanf(bond_file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1944 char tmp_str[IF_NAMESIZE + 32];
1945 struct rte_pci_addr pci_addr;
1946 struct mlx5_switch_info info;
1949 /* Process slave interface names in the loop. */
1950 snprintf(tmp_str, sizeof(tmp_str),
1951 "/sys/class/net/%s", ifname);
1952 if (mlx5_get_pci_addr(tmp_str, &pci_addr)) {
1954 "Cannot get PCI address for netdev \"%s\".",
1958 /* Slave interface PCI address match found. */
1959 snprintf(tmp_str, sizeof(tmp_str),
1960 "/sys/class/net/%s/phys_port_name", ifname);
1961 file = fopen(tmp_str, "rb");
1964 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1965 if (fscanf(file, "%32s", tmp_str) == 1)
1966 mlx5_translate_port_name(tmp_str, &info);
1968 /* Only process PF ports. */
1969 if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_LEGACY &&
1970 info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1972 /* Check max bonding member. */
1973 if (info.port_name >= MLX5_BOND_MAX_PORTS) {
1974 DRV_LOG(WARNING, "bonding index out of range, "
1975 "please increase MLX5_BOND_MAX_PORTS: %s",
1980 snprintf(tmp_str, sizeof(tmp_str),
1981 "/sys/class/net/%s/ifindex", ifname);
1982 file = fopen(tmp_str, "rb");
1985 ret = fscanf(file, "%u", &ifindex);
1989 /* Save bonding info. */
1990 strncpy(bond_info->ports[info.port_name].ifname, ifname,
1991 sizeof(bond_info->ports[0].ifname));
1992 bond_info->ports[info.port_name].pci_addr = pci_addr;
1993 bond_info->ports[info.port_name].ifindex = ifindex;
1994 bond_info->n_port++;
1996 * Under socket direct mode, bonding will use
1997 * system_image_guid as identification.
1998 * After OFED 5.4, guid is readable (ret >= 0) under sysfs.
1999 * All bonding members should have the same guid even if driver
2000 * is using PCIe BDF.
2002 ret = mlx5_get_device_guid(&pci_addr, guid, sizeof(guid));
2006 if (!memcmp(guid, cur_guid, sizeof(guid)) &&
2007 owner == info.port_name &&
2008 (owner != 0 || (owner == 0 &&
2009 !rte_pci_addr_cmp(pci_dev, &pci_addr))))
2010 pf = info.port_name;
2011 } else if (pci_dev->domain == pci_addr.domain &&
2012 pci_dev->bus == pci_addr.bus &&
2013 pci_dev->devid == pci_addr.devid &&
2014 ((pci_dev->function == 0 &&
2015 pci_dev->function + owner == pci_addr.function) ||
2016 (pci_dev->function == owner &&
2017 pci_addr.function == owner)))
2018 pf = info.port_name;
2021 /* Get bond interface info */
2022 ret = mlx5_sysfs_bond_info(ifindex, &bond_info->ifindex,
2025 DRV_LOG(ERR, "unable to get bond info: %s",
2026 strerror(rte_errno));
2028 DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
2029 ifindex, bond_info->ifindex, bond_info->ifname);
2031 if (owner == 0 && pf != 0) {
2032 DRV_LOG(INFO, "PCIe instance %04x:%02x:%02x.%x isn't bonding owner",
2033 pci_dev->domain, pci_dev->bus, pci_dev->devid,
2040 mlx5_os_config_default(struct mlx5_dev_config *config,
2041 struct mlx5_common_dev_config *cconf)
2043 memset(config, 0, sizeof(*config));
2044 config->mps = MLX5_ARG_UNSET;
2045 config->rx_vec_en = 1;
2046 config->txq_inline_max = MLX5_ARG_UNSET;
2047 config->txq_inline_min = MLX5_ARG_UNSET;
2048 config->txq_inline_mpw = MLX5_ARG_UNSET;
2049 config->txqs_inline = MLX5_ARG_UNSET;
2050 config->vf_nl_en = 1;
2051 config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2052 config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2053 config->mprq.log_min_stride_wqe_size = cconf->devx ?
2054 cconf->hca_attr.log_min_stride_wqe_sz :
2055 MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
2056 config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
2057 config->dv_esw_en = 1;
2058 config->dv_flow_en = 1;
2059 config->decap_en = 1;
2060 config->log_hp_size = MLX5_ARG_UNSET;
2061 config->allow_duplicate_pattern = 1;
2062 config->std_delay_drop = 0;
2063 config->hp_delay_drop = 0;
2067 * Register a PCI device within bonding.
2069 * This function spawns Ethernet devices out of a given PCI device and
2070 * bonding owner PF index.
2073 * Pointer to common mlx5 device structure.
2074 * @param[in] req_eth_da
2075 * Requested ethdev device argument.
2076 * @param[in] owner_id
2077 * Requested owner PF port ID within bonding device, default to 0.
2080 * 0 on success, a negative errno value otherwise and rte_errno is set.
2083 mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
2084 struct rte_eth_devargs *req_eth_da,
2087 struct ibv_device **ibv_list;
2089 * Number of found IB Devices matching with requested PCI BDF.
2090 * nd != 1 means there are multiple IB devices over the same
2091 * PCI device and we have representors and master.
2093 unsigned int nd = 0;
2095 * Number of found IB device Ports. nd = 1 and np = 1..n means
2096 * we have the single multiport IB device, and there may be
2097 * representors attached to some of found ports.
2099 unsigned int np = 0;
2101 * Number of DPDK ethernet devices to Spawn - either over
2102 * multiple IB devices or multiple ports of single IB device.
2103 * Actually this is the number of iterations to spawn.
2105 unsigned int ns = 0;
2108 * < 0 - no bonding device (single one)
2109 * >= 0 - bonding device (value is slave PF index)
2112 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
2113 struct mlx5_dev_spawn_data *list = NULL;
2114 struct mlx5_dev_config dev_config;
2115 unsigned int dev_config_vf;
2116 struct rte_eth_devargs eth_da = *req_eth_da;
2117 struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
2118 struct mlx5_bond_info bond_info;
2122 ibv_list = mlx5_glue->get_device_list(&ret);
2124 rte_errno = errno ? errno : ENOSYS;
2125 DRV_LOG(ERR, "Cannot list devices, is ib_uverbs loaded?");
2129 * First scan the list of all Infiniband devices to find
2130 * matching ones, gathering into the list.
2132 struct ibv_device *ibv_match[ret + 1];
2133 int nl_route = mlx5_nl_init(NETLINK_ROUTE);
2134 int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
2138 struct rte_pci_addr pci_addr;
2140 DRV_LOG(DEBUG, "Checking device \"%s\"", ibv_list[ret]->name);
2141 bd = mlx5_device_bond_pci_match(ibv_list[ret]->name, &owner_pci,
2142 nl_rdma, owner_id, &bond_info);
2145 * Bonding device detected. Only one match is allowed,
2146 * the bonding is supported over multi-port IB device,
2147 * there should be no matches on representor PCI
2148 * functions or non VF LAG bonding devices with
2149 * specified address.
2153 "multiple PCI match on bonding device"
2154 "\"%s\" found", ibv_list[ret]->name);
2159 /* Amend owner pci address if owner PF ID specified. */
2160 if (eth_da.nb_representor_ports)
2161 owner_pci.function += owner_id;
2163 "PCI information matches for slave %d bonding device \"%s\"",
2164 bd, ibv_list[ret]->name);
2165 ibv_match[nd++] = ibv_list[ret];
2168 /* Bonding device not found. */
2169 if (mlx5_get_pci_addr(ibv_list[ret]->ibdev_path,
2172 if (owner_pci.domain != pci_addr.domain ||
2173 owner_pci.bus != pci_addr.bus ||
2174 owner_pci.devid != pci_addr.devid ||
2175 owner_pci.function != pci_addr.function)
2177 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
2178 ibv_list[ret]->name);
2179 ibv_match[nd++] = ibv_list[ret];
2182 ibv_match[nd] = NULL;
2184 /* No device matches, just complain and bail out. */
2186 "No Verbs device matches PCI device " PCI_PRI_FMT ","
2187 " are kernel drivers loaded?",
2188 owner_pci.domain, owner_pci.bus,
2189 owner_pci.devid, owner_pci.function);
2196 * Found single matching device may have multiple ports.
2197 * Each port may be representor, we have to check the port
2198 * number and check the representors existence.
2201 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
2204 "Cannot get IB device \"%s\" ports number.",
2205 ibv_match[0]->name);
2206 if (bd >= 0 && !np) {
2207 DRV_LOG(ERR, "Cannot get ports for bonding device.");
2213 /* Now we can determine the maximal amount of devices to be spawned. */
2214 list = mlx5_malloc(MLX5_MEM_ZERO,
2215 sizeof(struct mlx5_dev_spawn_data) * (np ? np : nd),
2216 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
2218 DRV_LOG(ERR, "Spawn data array allocation failure.");
2223 if (bd >= 0 || np > 1) {
2225 * Single IB device with multiple ports found,
2226 * it may be E-Switch master device and representors.
2227 * We have to perform identification through the ports.
2229 MLX5_ASSERT(nl_rdma >= 0);
2230 MLX5_ASSERT(ns == 0);
2231 MLX5_ASSERT(nd == 1);
2233 for (i = 1; i <= np; ++i) {
2234 list[ns].bond_info = &bond_info;
2235 list[ns].max_port = np;
2236 list[ns].phys_port = i;
2237 list[ns].phys_dev_name = ibv_match[0]->name;
2238 list[ns].eth_dev = NULL;
2239 list[ns].pci_dev = pci_dev;
2240 list[ns].cdev = cdev;
2241 list[ns].pf_bond = bd;
2242 list[ns].ifindex = mlx5_nl_ifindex(nl_rdma,
2245 if (!list[ns].ifindex) {
2247 * No network interface index found for the
2248 * specified port, it means there is no
2249 * representor on this port. It's OK,
2250 * there can be disabled ports, for example
2251 * if sriov_numvfs < sriov_totalvfs.
2257 ret = mlx5_nl_switch_info(nl_route,
2260 if (ret || (!list[ns].info.representor &&
2261 !list[ns].info.master)) {
2263 * We failed to recognize representors with
2264 * Netlink, let's try to perform the task
2267 ret = mlx5_sysfs_switch_info(list[ns].ifindex,
2270 if (!ret && bd >= 0) {
2271 switch (list[ns].info.name_type) {
2272 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2275 * Force standalone bonding
2276 * device for ROCE LAG
2279 list[ns].info.master = 0;
2280 list[ns].info.representor = 0;
2282 if (list[ns].info.port_name == bd)
2285 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2287 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2289 case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2290 if (list[ns].info.pf_num == bd)
2298 if (!ret && (list[ns].info.representor ^
2299 list[ns].info.master))
2304 "Unable to recognize master/representors on the IB device with multiple ports.");
2311 * The existence of several matching entries (nd > 1) means
2312 * port representors have been instantiated. No existing Verbs
2313 * call nor sysfs entries can tell them apart, this can only
2314 * be done through Netlink calls assuming kernel drivers are
2315 * recent enough to support them.
2317 * In the event of identification failure through Netlink,
2318 * try again through sysfs, then:
2320 * 1. A single IB device matches (nd == 1) with single
2321 * port (np=0/1) and is not a representor, assume
2322 * no switch support.
2324 * 2. Otherwise no safe assumptions can be made;
2325 * complain louder and bail out.
2327 for (i = 0; i != nd; ++i) {
2328 memset(&list[ns].info, 0, sizeof(list[ns].info));
2329 list[ns].bond_info = NULL;
2330 list[ns].max_port = 1;
2331 list[ns].phys_port = 1;
2332 list[ns].phys_dev_name = ibv_match[i]->name;
2333 list[ns].eth_dev = NULL;
2334 list[ns].pci_dev = pci_dev;
2335 list[ns].cdev = cdev;
2336 list[ns].pf_bond = -1;
2337 list[ns].ifindex = 0;
2339 list[ns].ifindex = mlx5_nl_ifindex
2343 if (!list[ns].ifindex) {
2344 char ifname[IF_NAMESIZE];
2347 * Netlink failed, it may happen with old
2348 * ib_core kernel driver (before 4.16).
2349 * We can assume there is old driver because
2350 * here we are processing single ports IB
2351 * devices. Let's try sysfs to retrieve
2352 * the ifindex. The method works for
2353 * master device only.
2357 * Multiple devices found, assume
2358 * representors, can not distinguish
2359 * master/representor and retrieve
2360 * ifindex via sysfs.
2364 ret = mlx5_get_ifname_sysfs
2365 (ibv_match[i]->ibdev_path, ifname);
2368 if_nametoindex(ifname);
2369 if (!list[ns].ifindex) {
2371 * No network interface index found
2372 * for the specified device, it means
2373 * there it is neither representor
2381 ret = mlx5_nl_switch_info(nl_route,
2384 if (ret || (!list[ns].info.representor &&
2385 !list[ns].info.master)) {
2387 * We failed to recognize representors with
2388 * Netlink, let's try to perform the task
2391 ret = mlx5_sysfs_switch_info(list[ns].ifindex,
2394 if (!ret && (list[ns].info.representor ^
2395 list[ns].info.master)) {
2397 } else if ((nd == 1) &&
2398 !list[ns].info.representor &&
2399 !list[ns].info.master) {
2401 * Single IB device with one physical port and
2402 * attached network device.
2403 * May be SRIOV is not enabled or there is no
2406 DRV_LOG(INFO, "No E-Switch support detected.");
2413 "Unable to recognize master/representors on the multiple IB devices.");
2419 * New kernels may add the switch_id attribute for the case
2420 * there is no E-Switch and we wrongly recognized the only
2421 * device as master. Override this if there is the single
2422 * device with single port and new device name format present.
2425 list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
2426 list[0].info.master = 0;
2427 list[0].info.representor = 0;
2432 * Sort list to probe devices in natural order for users convenience
2433 * (i.e. master first, then representors from lowest to highest ID).
2435 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2436 /* Device specific configuration. */
2437 switch (pci_dev->id.device_id) {
2438 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2439 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2440 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2441 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2442 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2443 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2444 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
2451 if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) {
2452 /* Set devargs default values. */
2453 if (eth_da.nb_mh_controllers == 0) {
2454 eth_da.nb_mh_controllers = 1;
2455 eth_da.mh_controllers[0] = 0;
2457 if (eth_da.nb_ports == 0 && ns > 0) {
2458 if (list[0].pf_bond >= 0 && list[0].info.representor)
2459 DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s",
2460 pci_dev->device.devargs->args);
2461 eth_da.nb_ports = 1;
2462 eth_da.ports[0] = list[0].info.pf_num;
2464 if (eth_da.nb_representor_ports == 0) {
2465 eth_da.nb_representor_ports = 1;
2466 eth_da.representor_ports[0] = 0;
2469 for (i = 0; i != ns; ++i) {
2472 /* Default configuration. */
2473 mlx5_os_config_default(&dev_config, &cdev->config);
2474 dev_config.vf = dev_config_vf;
2475 list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
2476 &dev_config, ð_da);
2477 if (!list[i].eth_dev) {
2478 if (rte_errno != EBUSY && rte_errno != EEXIST)
2480 /* Device is disabled or already spawned. Ignore it. */
2483 restore = list[i].eth_dev->data->dev_flags;
2484 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2486 * Each representor has a dedicated interrupts vector.
2487 * rte_eth_copy_pci_info() assigns PF interrupts handle to
2488 * representor eth_dev object because representor and PF
2489 * share the same PCI address.
2490 * Override representor device with a dedicated
2491 * interrupts handle here.
2492 * Representor interrupts handle is released in mlx5_dev_stop().
2494 if (list[i].info.representor) {
2495 struct rte_intr_handle *intr_handle =
2496 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2497 if (intr_handle == NULL) {
2499 "port %u failed to allocate memory for interrupt handler "
2500 "Rx interrupts will not be supported",
2506 list[i].eth_dev->intr_handle = intr_handle;
2508 /* Restore non-PCI flags cleared by the above call. */
2509 list[i].eth_dev->data->dev_flags |= restore;
2510 rte_eth_dev_probing_finish(list[i].eth_dev);
2514 "probe of PCI device " PCI_PRI_FMT " aborted after"
2515 " encountering an error: %s",
2516 owner_pci.domain, owner_pci.bus,
2517 owner_pci.devid, owner_pci.function,
2518 strerror(rte_errno));
2522 if (!list[i].eth_dev)
2524 mlx5_dev_close(list[i].eth_dev);
2525 /* mac_addrs must not be freed because in dev_private */
2526 list[i].eth_dev->data->mac_addrs = NULL;
2527 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2529 /* Restore original error. */
2536 * Do the routine cleanup:
2537 * - close opened Netlink sockets
2538 * - free allocated spawn data array
2539 * - free the Infiniband device list
2547 MLX5_ASSERT(ibv_list);
2548 mlx5_glue->free_device_list(ibv_list);
2553 mlx5_os_parse_eth_devargs(struct rte_device *dev,
2554 struct rte_eth_devargs *eth_da)
2558 if (dev->devargs == NULL)
2560 memset(eth_da, 0, sizeof(*eth_da));
2561 /* Parse representor information first from class argument. */
2562 if (dev->devargs->cls_str)
2563 ret = rte_eth_devargs_parse(dev->devargs->cls_str, eth_da);
2565 DRV_LOG(ERR, "failed to parse device arguments: %s",
2566 dev->devargs->cls_str);
2569 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) {
2570 /* Parse legacy device argument */
2571 ret = rte_eth_devargs_parse(dev->devargs->args, eth_da);
2573 DRV_LOG(ERR, "failed to parse device arguments: %s",
2574 dev->devargs->args);
2582 * Callback to register a PCI device.
2584 * This function spawns Ethernet devices out of a given PCI device.
2587 * Pointer to common mlx5 device structure.
2590 * 0 on success, a negative errno value otherwise and rte_errno is set.
2593 mlx5_os_pci_probe(struct mlx5_common_device *cdev)
2595 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
2596 struct rte_eth_devargs eth_da = { .nb_ports = 0 };
2600 ret = mlx5_os_parse_eth_devargs(cdev->dev, ð_da);
2604 if (eth_da.nb_ports > 0) {
2605 /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
2606 for (p = 0; p < eth_da.nb_ports; p++) {
2607 ret = mlx5_os_pci_probe_pf(cdev, ð_da,
2613 DRV_LOG(ERR, "Probe of PCI device " PCI_PRI_FMT " "
2614 "aborted due to prodding failure of PF %u",
2615 pci_dev->addr.domain, pci_dev->addr.bus,
2616 pci_dev->addr.devid, pci_dev->addr.function,
2618 mlx5_net_remove(cdev);
2621 ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0);
2626 /* Probe a single SF device on auxiliary bus, no representor support. */
2628 mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
2630 struct rte_eth_devargs eth_da = { .nb_ports = 0 };
2631 struct mlx5_dev_config config;
2632 struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 };
2633 struct rte_device *dev = cdev->dev;
2634 struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev);
2635 struct rte_eth_dev *eth_dev;
2638 /* Parse ethdev devargs. */
2639 ret = mlx5_os_parse_eth_devargs(dev, ð_da);
2642 /* Set default config data. */
2643 mlx5_os_config_default(&config, &cdev->config);
2645 /* Init spawn data. */
2647 spawn.phys_port = 1;
2648 spawn.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
2649 ret = mlx5_auxiliary_get_ifindex(dev->name);
2651 DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
2654 spawn.ifindex = ret;
2657 eth_dev = mlx5_dev_spawn(dev, &spawn, &config, ð_da);
2658 if (eth_dev == NULL)
2661 eth_dev->intr_handle = adev->intr_handle;
2662 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2663 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2664 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV;
2665 eth_dev->data->numa_node = dev->numa_node;
2667 rte_eth_dev_probing_finish(eth_dev);
2672 * Net class driver callback to probe a device.
2674 * This function probe PCI bus device(s) or a single SF on auxiliary bus.
2677 * Pointer to the common mlx5 device.
2680 * 0 on success, a negative errno value otherwise and rte_errno is set.
2683 mlx5_os_net_probe(struct mlx5_common_device *cdev)
2687 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2688 mlx5_pmd_socket_init();
2689 ret = mlx5_init_once();
2691 DRV_LOG(ERR, "Unable to init PMD global data: %s",
2692 strerror(rte_errno));
2695 if (mlx5_dev_is_pci(cdev->dev))
2696 return mlx5_os_pci_probe(cdev);
2698 return mlx5_os_auxiliary_probe(cdev);
2702 * Cleanup resources when the last device is closed.
2705 mlx5_os_net_cleanup(void)
2707 mlx5_pmd_socket_uninit();
2711 * Install shared asynchronous device events handler.
2712 * This function is implemented to support event sharing
2713 * between multiple ports of single IB device.
2716 * Pointer to mlx5_dev_ctx_shared object.
2719 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
2723 struct ibv_context *ctx = sh->cdev->ctx;
2725 sh->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2726 if (sh->intr_handle == NULL) {
2727 DRV_LOG(ERR, "Fail to allocate intr_handle");
2731 rte_intr_fd_set(sh->intr_handle, -1);
2733 flags = fcntl(ctx->async_fd, F_GETFL);
2734 ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
2736 DRV_LOG(INFO, "failed to change file descriptor async event"
2739 rte_intr_fd_set(sh->intr_handle, ctx->async_fd);
2740 rte_intr_type_set(sh->intr_handle, RTE_INTR_HANDLE_EXT);
2741 if (rte_intr_callback_register(sh->intr_handle,
2742 mlx5_dev_interrupt_handler, sh)) {
2743 DRV_LOG(INFO, "Fail to install the shared interrupt.");
2744 rte_intr_fd_set(sh->intr_handle, -1);
2748 #ifdef HAVE_IBV_DEVX_ASYNC
2749 sh->intr_handle_devx =
2750 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2751 if (!sh->intr_handle_devx) {
2752 DRV_LOG(ERR, "Fail to allocate intr_handle");
2756 rte_intr_fd_set(sh->intr_handle_devx, -1);
2757 sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
2758 struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
2760 DRV_LOG(INFO, "failed to allocate devx_comp.");
2763 flags = fcntl(devx_comp->fd, F_GETFL);
2764 ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
2766 DRV_LOG(INFO, "failed to change file descriptor"
2770 rte_intr_fd_set(sh->intr_handle_devx, devx_comp->fd);
2771 rte_intr_type_set(sh->intr_handle_devx,
2772 RTE_INTR_HANDLE_EXT);
2773 if (rte_intr_callback_register(sh->intr_handle_devx,
2774 mlx5_dev_interrupt_handler_devx, sh)) {
2775 DRV_LOG(INFO, "Fail to install the devx shared"
2777 rte_intr_fd_set(sh->intr_handle_devx, -1);
2779 #endif /* HAVE_IBV_DEVX_ASYNC */
2784 * Uninstall shared asynchronous device events handler.
2785 * This function is implemented to support event sharing
2786 * between multiple ports of single IB device.
2789 * Pointer to mlx5_dev_ctx_shared object.
2792 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
2794 if (rte_intr_fd_get(sh->intr_handle) >= 0)
2795 mlx5_intr_callback_unregister(sh->intr_handle,
2796 mlx5_dev_interrupt_handler, sh);
2797 rte_intr_instance_free(sh->intr_handle);
2798 #ifdef HAVE_IBV_DEVX_ASYNC
2799 if (rte_intr_fd_get(sh->intr_handle_devx) >= 0)
2800 rte_intr_callback_unregister(sh->intr_handle_devx,
2801 mlx5_dev_interrupt_handler_devx, sh);
2802 rte_intr_instance_free(sh->intr_handle_devx);
2804 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
2809 * Read statistics by a named counter.
2812 * Pointer to the private device data structure.
2813 * @param[in] ctr_name
2814 * Pointer to the name of the statistic counter to read
2816 * Pointer to read statistic value.
2818 * 0 on success and stat is valud, 1 if failed to read the value
2823 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2829 if (priv->q_counters != NULL &&
2830 strcmp(ctr_name, "out_of_buffer") == 0)
2831 return mlx5_devx_cmd_queue_counter_query
2832 (priv->q_counters, 0, (uint32_t *)stat);
2833 MKSTR(path, "%s/ports/%d/hw_counters/%s",
2834 priv->sh->ibdev_path,
2837 fd = open(path, O_RDONLY);
2839 * in switchdev the file location is not per port
2840 * but rather in <ibdev_path>/hw_counters/<file_name>.
2843 MKSTR(path1, "%s/hw_counters/%s",
2844 priv->sh->ibdev_path,
2846 fd = open(path1, O_RDONLY);
2849 char buf[21] = {'\0'};
2850 ssize_t n = read(fd, buf, sizeof(buf));
2854 *stat = strtoull(buf, NULL, 10);
2864 * Remove a MAC address from device
2867 * Pointer to Ethernet device structure.
2869 * MAC address index.
2872 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2874 struct mlx5_priv *priv = dev->data->dev_private;
2875 const int vf = priv->config.vf;
2878 mlx5_nl_mac_addr_remove(priv->nl_socket_route,
2879 mlx5_ifindex(dev), priv->mac_own,
2880 &dev->data->mac_addrs[index], index);
2884 * Adds a MAC address to the device
2887 * Pointer to Ethernet device structure.
2889 * MAC address to register.
2891 * MAC address index.
2894 * 0 on success, a negative errno value otherwise
2897 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2900 struct mlx5_priv *priv = dev->data->dev_private;
2901 const int vf = priv->config.vf;
2905 ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
2906 mlx5_ifindex(dev), priv->mac_own,
2912 * Modify a VF MAC address
2915 * Pointer to device private data.
2917 * MAC address to modify into.
2919 * Net device interface index
2924 * 0 on success, a negative errno value otherwise
2927 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
2928 unsigned int iface_idx,
2929 struct rte_ether_addr *mac_addr,
2932 return mlx5_nl_vf_mac_addr_modify
2933 (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
2937 * Set device promiscuous mode
2940 * Pointer to Ethernet device structure.
2942 * 0 - promiscuous is disabled, otherwise - enabled
2945 * 0 on success, a negative error value otherwise
2948 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
2950 struct mlx5_priv *priv = dev->data->dev_private;
2952 return mlx5_nl_promisc(priv->nl_socket_route,
2953 mlx5_ifindex(dev), !!enable);
2957 * Set device promiscuous mode
2960 * Pointer to Ethernet device structure.
2962 * 0 - all multicase is disabled, otherwise - enabled
2965 * 0 on success, a negative error value otherwise
2968 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
2970 struct mlx5_priv *priv = dev->data->dev_private;
2972 return mlx5_nl_allmulti(priv->nl_socket_route,
2973 mlx5_ifindex(dev), !!enable);
2977 * Flush device MAC addresses
2980 * Pointer to Ethernet device structure.
2984 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
2986 struct mlx5_priv *priv = dev->data->dev_private;
2988 mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
2989 dev->data->mac_addrs,
2990 MLX5_MAX_MAC_ADDRESSES, priv->mac_own);