1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
13 #include <linux/rtnetlink.h>
14 #include <linux/sockios.h>
15 #include <linux/ethtool.h>
18 #include <rte_malloc.h>
19 #include <ethdev_driver.h>
20 #include <ethdev_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_common.h>
24 #include <rte_kvargs.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_string_fns.h>
28 #include <rte_alarm.h>
29 #include <rte_eal_paging.h>
31 #include <mlx5_glue.h>
32 #include <mlx5_devx_cmds.h>
33 #include <mlx5_common.h>
34 #include <mlx5_common_mp.h>
35 #include <mlx5_common_mr.h>
36 #include <mlx5_malloc.h>
38 #include "mlx5_defs.h"
40 #include "mlx5_common_os.h"
41 #include "mlx5_utils.h"
42 #include "mlx5_rxtx.h"
45 #include "mlx5_autoconf.h"
47 #include "mlx5_flow.h"
48 #include "rte_pmd_mlx5.h"
49 #include "mlx5_verbs.h"
51 #include "mlx5_devx.h"
53 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
55 #ifndef HAVE_IBV_MLX5_MOD_MPW
56 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
57 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
60 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
61 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
64 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
66 /* Spinlock for mlx5_shared_data allocation. */
67 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
69 /* Process local data for secondary processes. */
70 static struct mlx5_local_data mlx5_local_data;
72 /* rte flow indexed pool configuration. */
73 static struct mlx5_indexed_pool_config icfg[] = {
75 .size = sizeof(struct rte_flow),
79 .malloc = mlx5_malloc,
82 .type = "ctl_flow_ipool",
85 .size = sizeof(struct rte_flow),
91 .malloc = mlx5_malloc,
93 .per_core_cache = 1 << 14,
94 .type = "rte_flow_ipool",
97 .size = sizeof(struct rte_flow),
103 .malloc = mlx5_malloc,
106 .type = "mcp_flow_ipool",
111 * Set the completion channel file descriptor interrupt as non-blocking.
114 * Pointer to RQ channel object, which includes the channel fd
117 * The file descriptor (representing the intetrrupt) used in this channel.
120 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
123 mlx5_os_set_nonblock_channel_fd(int fd)
127 flags = fcntl(fd, F_GETFL);
128 return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
132 * Get mlx5 device attributes. The glue function query_device_ex() is called
133 * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
134 * device attributes from the glue out parameter.
137 * Pointer to ibv context.
140 * Pointer to mlx5 device attributes.
143 * 0 on success, non zero error number otherwise
146 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
149 struct ibv_device_attr_ex attr_ex;
150 memset(device_attr, 0, sizeof(*device_attr));
151 err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
155 device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
156 device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
157 device_attr->max_sge = attr_ex.orig_attr.max_sge;
158 device_attr->max_cq = attr_ex.orig_attr.max_cq;
159 device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
160 device_attr->max_mr = attr_ex.orig_attr.max_mr;
161 device_attr->max_pd = attr_ex.orig_attr.max_pd;
162 device_attr->max_qp = attr_ex.orig_attr.max_qp;
163 device_attr->max_srq = attr_ex.orig_attr.max_srq;
164 device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
165 device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
166 device_attr->max_rwq_indirection_table_size =
167 attr_ex.rss_caps.max_rwq_indirection_table_size;
168 device_attr->max_tso = attr_ex.tso_caps.max_tso;
169 device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
171 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
172 err = mlx5_glue->dv_query_device(ctx, &dv_attr);
176 device_attr->flags = dv_attr.flags;
177 device_attr->comp_mask = dv_attr.comp_mask;
178 #ifdef HAVE_IBV_MLX5_MOD_SWP
179 device_attr->sw_parsing_offloads =
180 dv_attr.sw_parsing_caps.sw_parsing_offloads;
182 device_attr->min_single_stride_log_num_of_bytes =
183 dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
184 device_attr->max_single_stride_log_num_of_bytes =
185 dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
186 device_attr->min_single_wqe_log_num_of_strides =
187 dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
188 device_attr->max_single_wqe_log_num_of_strides =
189 dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
190 device_attr->stride_supported_qpts =
191 dv_attr.striding_rq_caps.supported_qpts;
192 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
193 device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
195 strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver,
196 sizeof(device_attr->fw_ver));
202 * Verbs callback to allocate a memory. This function should allocate the space
203 * according to the size provided residing inside a huge page.
204 * Please note that all allocation must respect the alignment from libmlx5
205 * (i.e. currently rte_mem_page_size()).
208 * The size in bytes of the memory to allocate.
210 * A pointer to the callback data.
213 * Allocated buffer, NULL otherwise and rte_errno is set.
216 mlx5_alloc_verbs_buf(size_t size, void *data)
218 struct mlx5_dev_ctx_shared *sh = data;
220 size_t alignment = rte_mem_page_size();
221 if (alignment == (size_t)-1) {
222 DRV_LOG(ERR, "Failed to get mem page size");
227 MLX5_ASSERT(data != NULL);
228 ret = mlx5_malloc(0, size, alignment, sh->numa_node);
235 * Detect misc5 support or not
238 * Device private data pointer
240 #ifdef HAVE_MLX5DV_DR
242 __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
244 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
245 /* Dummy VxLAN matcher to detect rdma-core misc5 cap
246 * Case: IPv4--->UDP--->VxLAN--->vni
249 struct mlx5_flow_dv_match_params matcher_mask;
254 uint32_t *tunnel_header_m;
255 struct mlx5dv_flow_matcher_attr dv_attr;
257 memset(&matcher_mask, 0, sizeof(matcher_mask));
258 matcher_mask.size = sizeof(matcher_mask.buf);
259 match_m = matcher_mask.buf;
260 headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
261 misc5_m = MLX5_ADDR_OF(fte_match_param,
262 match_m, misc_parameters_5);
263 tunnel_header_m = (uint32_t *)
264 MLX5_ADDR_OF(fte_match_set_misc5,
265 misc5_m, tunnel_header_1);
266 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
267 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 4);
268 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
269 *tunnel_header_m = 0xffffff;
271 tbl = mlx5_glue->dr_create_flow_tbl(priv->sh->rx_domain, 1);
273 DRV_LOG(INFO, "No SW steering support");
276 dv_attr.type = IBV_FLOW_ATTR_NORMAL,
277 dv_attr.match_mask = (void *)&matcher_mask,
278 dv_attr.match_criteria_enable =
279 (1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT) |
280 (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT);
281 dv_attr.priority = 3;
282 #ifdef HAVE_MLX5DV_DR_ESWITCH
284 if (priv->config.dv_esw_en) {
285 /* FDB enabled reg_c_0 */
286 dv_attr.match_criteria_enable |=
287 (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT);
288 misc2_m = MLX5_ADDR_OF(fte_match_param,
289 match_m, misc_parameters_2);
290 MLX5_SET(fte_match_set_misc2, misc2_m,
291 metadata_reg_c_0, 0xffff);
294 matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->ctx,
297 priv->sh->misc5_cap = 1;
298 mlx5_glue->dv_destroy_flow_matcher(matcher);
300 mlx5_glue->dr_destroy_flow_tbl(tbl);
308 * Verbs callback to free a memory.
311 * A pointer to the memory to free.
313 * A pointer to the callback data.
316 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
318 MLX5_ASSERT(data != NULL);
323 * Initialize DR related data within private structure.
324 * Routine checks the reference counter and does actual
325 * resources creation/initialization only if counter is zero.
328 * Pointer to the private device data structure.
331 * Zero on success, positive error code otherwise.
334 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
336 struct mlx5_dev_ctx_shared *sh = priv->sh;
337 char s[MLX5_HLIST_NAMESIZE] __rte_unused;
340 MLX5_ASSERT(sh && sh->refcnt);
343 err = mlx5_alloc_table_hash_list(priv);
346 /* The resources below are only valid with DV support. */
347 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
348 /* Init port id action mlx5 list. */
349 snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
350 mlx5_list_create(&sh->port_id_action_list, s, 0, sh,
351 flow_dv_port_id_create_cb,
352 flow_dv_port_id_match_cb,
353 flow_dv_port_id_remove_cb);
354 /* Init push vlan action mlx5 list. */
355 snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
356 mlx5_list_create(&sh->push_vlan_action_list, s, 0, sh,
357 flow_dv_push_vlan_create_cb,
358 flow_dv_push_vlan_match_cb,
359 flow_dv_push_vlan_remove_cb);
360 /* Init sample action mlx5 list. */
361 snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
362 mlx5_list_create(&sh->sample_action_list, s, 0, sh,
363 flow_dv_sample_create_cb,
364 flow_dv_sample_match_cb,
365 flow_dv_sample_remove_cb);
366 /* Init dest array action mlx5 list. */
367 snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
368 mlx5_list_create(&sh->dest_array_list, s, 0, sh,
369 flow_dv_dest_array_create_cb,
370 flow_dv_dest_array_match_cb,
371 flow_dv_dest_array_remove_cb);
372 /* Create tags hash list table. */
373 snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
374 sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,
375 MLX5_HLIST_WRITE_MOST,
376 flow_dv_tag_create_cb,
377 flow_dv_tag_match_cb,
378 flow_dv_tag_remove_cb);
379 if (!sh->tag_table) {
380 DRV_LOG(ERR, "tags with hash creation failed.");
384 sh->tag_table->ctx = sh;
385 snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name);
386 sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
387 0, MLX5_HLIST_WRITE_MOST |
388 MLX5_HLIST_DIRECT_KEY,
389 flow_dv_modify_create_cb,
390 flow_dv_modify_match_cb,
391 flow_dv_modify_remove_cb);
392 if (!sh->modify_cmds) {
393 DRV_LOG(ERR, "hdr modify hash creation failed");
397 sh->modify_cmds->ctx = sh;
398 snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
399 sh->encaps_decaps = mlx5_hlist_create(s,
400 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
401 0, MLX5_HLIST_DIRECT_KEY |
402 MLX5_HLIST_WRITE_MOST,
403 flow_dv_encap_decap_create_cb,
404 flow_dv_encap_decap_match_cb,
405 flow_dv_encap_decap_remove_cb);
406 if (!sh->encaps_decaps) {
407 DRV_LOG(ERR, "encap decap hash creation failed");
411 sh->encaps_decaps->ctx = sh;
413 #ifdef HAVE_MLX5DV_DR
416 /* Reference counter is zero, we should initialize structures. */
417 domain = mlx5_glue->dr_create_domain(sh->ctx,
418 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
420 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
424 sh->rx_domain = domain;
425 domain = mlx5_glue->dr_create_domain(sh->ctx,
426 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
428 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
432 sh->tx_domain = domain;
433 #ifdef HAVE_MLX5DV_DR_ESWITCH
434 if (priv->config.dv_esw_en) {
435 domain = mlx5_glue->dr_create_domain
436 (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
438 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
442 sh->fdb_domain = domain;
445 * The drop action is just some dummy placeholder in rdma-core. It
446 * does not belong to domains and has no any attributes, and, can be
447 * shared by the entire device.
449 sh->dr_drop_action = mlx5_glue->dr_create_flow_action_drop();
450 if (!sh->dr_drop_action) {
451 DRV_LOG(ERR, "FDB mlx5dv_dr_create_flow_action_drop");
457 err = mlx5_alloc_tunnel_hub(sh);
459 DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
462 if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
463 mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
464 mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
466 mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
468 sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
469 if (!priv->config.allow_duplicate_pattern) {
470 #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE
471 DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?");
473 mlx5_glue->dr_allow_duplicate_rules(sh->rx_domain, 0);
474 mlx5_glue->dr_allow_duplicate_rules(sh->tx_domain, 0);
476 mlx5_glue->dr_allow_duplicate_rules(sh->fdb_domain, 0);
479 __mlx5_discovery_misc5_cap(priv);
480 #endif /* HAVE_MLX5DV_DR */
481 sh->default_miss_action =
482 mlx5_glue->dr_create_flow_action_default_miss();
483 if (!sh->default_miss_action)
484 DRV_LOG(WARNING, "Default miss action is not supported.");
487 /* Rollback the created objects. */
489 mlx5_glue->dr_destroy_domain(sh->rx_domain);
490 sh->rx_domain = NULL;
493 mlx5_glue->dr_destroy_domain(sh->tx_domain);
494 sh->tx_domain = NULL;
496 if (sh->fdb_domain) {
497 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
498 sh->fdb_domain = NULL;
500 if (sh->dr_drop_action) {
501 mlx5_glue->destroy_flow_action(sh->dr_drop_action);
502 sh->dr_drop_action = NULL;
504 if (sh->pop_vlan_action) {
505 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
506 sh->pop_vlan_action = NULL;
508 if (sh->encaps_decaps) {
509 mlx5_hlist_destroy(sh->encaps_decaps);
510 sh->encaps_decaps = NULL;
512 if (sh->modify_cmds) {
513 mlx5_hlist_destroy(sh->modify_cmds);
514 sh->modify_cmds = NULL;
517 /* tags should be destroyed with flow before. */
518 mlx5_hlist_destroy(sh->tag_table);
519 sh->tag_table = NULL;
521 if (sh->tunnel_hub) {
522 mlx5_release_tunnel_hub(sh, priv->dev_port);
523 sh->tunnel_hub = NULL;
525 mlx5_free_table_hash_list(priv);
530 * Destroy DR related data within private structure.
533 * Pointer to the private device data structure.
536 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
538 struct mlx5_dev_ctx_shared *sh = priv->sh;
540 MLX5_ASSERT(sh && sh->refcnt);
543 #ifdef HAVE_MLX5DV_DR
545 mlx5_glue->dr_destroy_domain(sh->rx_domain);
546 sh->rx_domain = NULL;
549 mlx5_glue->dr_destroy_domain(sh->tx_domain);
550 sh->tx_domain = NULL;
552 #ifdef HAVE_MLX5DV_DR_ESWITCH
553 if (sh->fdb_domain) {
554 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
555 sh->fdb_domain = NULL;
557 if (sh->dr_drop_action) {
558 mlx5_glue->destroy_flow_action(sh->dr_drop_action);
559 sh->dr_drop_action = NULL;
562 if (sh->pop_vlan_action) {
563 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
564 sh->pop_vlan_action = NULL;
566 #endif /* HAVE_MLX5DV_DR */
567 if (sh->default_miss_action)
568 mlx5_glue->destroy_flow_action
569 (sh->default_miss_action);
570 if (sh->encaps_decaps) {
571 mlx5_hlist_destroy(sh->encaps_decaps);
572 sh->encaps_decaps = NULL;
574 if (sh->modify_cmds) {
575 mlx5_hlist_destroy(sh->modify_cmds);
576 sh->modify_cmds = NULL;
579 /* tags should be destroyed with flow before. */
580 mlx5_hlist_destroy(sh->tag_table);
581 sh->tag_table = NULL;
583 if (sh->tunnel_hub) {
584 mlx5_release_tunnel_hub(sh, priv->dev_port);
585 sh->tunnel_hub = NULL;
587 mlx5_list_destroy(&sh->port_id_action_list);
588 mlx5_list_destroy(&sh->push_vlan_action_list);
589 mlx5_free_table_hash_list(priv);
593 * Initialize shared data between primary and secondary process.
595 * A memzone is reserved by primary process and secondary processes attach to
599 * 0 on success, a negative errno value otherwise and rte_errno is set.
602 mlx5_init_shared_data(void)
604 const struct rte_memzone *mz;
607 rte_spinlock_lock(&mlx5_shared_data_lock);
608 if (mlx5_shared_data == NULL) {
609 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
610 /* Allocate shared memory. */
611 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
612 sizeof(*mlx5_shared_data),
616 "Cannot allocate mlx5 shared data");
620 mlx5_shared_data = mz->addr;
621 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
622 rte_spinlock_init(&mlx5_shared_data->lock);
624 /* Lookup allocated shared memory. */
625 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
628 "Cannot attach mlx5 shared data");
632 mlx5_shared_data = mz->addr;
633 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
637 rte_spinlock_unlock(&mlx5_shared_data_lock);
642 * PMD global initialization.
644 * Independent from individual device, this function initializes global
645 * per-PMD data structures distinguishing primary and secondary processes.
646 * Hence, each initialization is called once per a process.
649 * 0 on success, a negative errno value otherwise and rte_errno is set.
654 struct mlx5_shared_data *sd;
655 struct mlx5_local_data *ld = &mlx5_local_data;
658 if (mlx5_init_shared_data())
660 sd = mlx5_shared_data;
662 rte_spinlock_lock(&sd->lock);
663 switch (rte_eal_process_type()) {
664 case RTE_PROC_PRIMARY:
667 LIST_INIT(&sd->mem_event_cb_list);
668 rte_rwlock_init(&sd->mem_event_rwlock);
669 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
670 mlx5_mr_mem_event_cb, NULL);
671 ret = mlx5_mp_init_primary(MLX5_MP_NAME,
672 mlx5_mp_os_primary_handle);
675 sd->init_done = true;
677 case RTE_PROC_SECONDARY:
680 ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
681 mlx5_mp_os_secondary_handle);
685 ld->init_done = true;
691 rte_spinlock_unlock(&sd->lock);
696 * Create the Tx queue DevX/Verbs object.
699 * Pointer to Ethernet device.
701 * Queue index in DPDK Tx queue array.
704 * 0 on success, a negative errno value otherwise and rte_errno is set.
707 mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
709 struct mlx5_priv *priv = dev->data->dev_private;
710 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
711 struct mlx5_txq_ctrl *txq_ctrl =
712 container_of(txq_data, struct mlx5_txq_ctrl, txq);
714 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
715 return mlx5_txq_devx_obj_new(dev, idx);
716 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
717 if (!priv->config.dv_esw_en)
718 return mlx5_txq_devx_obj_new(dev, idx);
720 return mlx5_txq_ibv_obj_new(dev, idx);
724 * Release an Tx DevX/verbs queue object.
727 * DevX/Verbs Tx queue object.
730 mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj)
732 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
733 mlx5_txq_devx_obj_release(txq_obj);
736 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
737 if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) {
738 mlx5_txq_devx_obj_release(txq_obj);
742 mlx5_txq_ibv_obj_release(txq_obj);
746 * DV flow counter mode detect and config.
749 * Pointer to rte_eth_dev structure.
753 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
755 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
756 struct mlx5_priv *priv = dev->data->dev_private;
757 struct mlx5_dev_ctx_shared *sh = priv->sh;
760 #ifndef HAVE_IBV_DEVX_ASYNC
764 if (!priv->config.devx || !priv->config.dv_flow_en ||
765 !priv->config.hca_attr.flow_counters_dump ||
766 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
767 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
771 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
772 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
773 priv->config.hca_attr.flow_counters_dump,
774 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
775 /* Initialize fallback mode only on the port initializes sh. */
777 sh->cmng.counter_fallback = fallback;
778 else if (fallback != sh->cmng.counter_fallback)
779 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
780 "with others:%d.", PORT_ID(priv), fallback);
785 mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
787 struct mlx5_priv *priv = dev->data->dev_private;
788 void *ctx = priv->sh->ctx;
790 priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
791 if (!priv->q_counters) {
792 struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
795 DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
796 "by DevX - fall-back to use the kernel driver global "
797 "queue counter.", dev->data->port_id);
798 /* Create WQ by kernel and query its queue counter ID. */
800 wq = mlx5_glue->create_wq(ctx,
801 &(struct ibv_wq_init_attr){
802 .wq_type = IBV_WQT_RQ,
809 /* Counter is assigned only on RDY state. */
810 int ret = mlx5_glue->modify_wq(wq,
811 &(struct ibv_wq_attr){
812 .attr_mask = IBV_WQ_ATTR_STATE,
813 .wq_state = IBV_WQS_RDY,
817 mlx5_devx_cmd_wq_query(wq,
818 &priv->counter_set_id);
819 claim_zero(mlx5_glue->destroy_wq(wq));
821 claim_zero(mlx5_glue->destroy_cq(cq));
824 priv->counter_set_id = priv->q_counters->id;
826 if (priv->counter_set_id == 0)
827 DRV_LOG(INFO, "Part of the port %d statistics will not be "
828 "available.", dev->data->port_id);
832 * Check if representor spawn info match devargs.
835 * Verbs device parameters (name, port, switch_info) to spawn.
837 * Device devargs to probe.
843 mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
844 struct rte_eth_devargs *eth_da)
846 struct mlx5_switch_info *switch_info = &spawn->info;
849 uint16_t repr_id = mlx5_representor_id_encode(switch_info,
852 switch (eth_da->type) {
853 case RTE_ETH_REPRESENTOR_SF:
854 if (!(spawn->info.port_name == -1 &&
855 switch_info->name_type ==
856 MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
857 switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF) {
862 case RTE_ETH_REPRESENTOR_VF:
863 /* Allows HPF representor index -1 as exception. */
864 if (!(spawn->info.port_name == -1 &&
865 switch_info->name_type ==
866 MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
867 switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF) {
872 case RTE_ETH_REPRESENTOR_NONE:
877 DRV_LOG(ERR, "unsupported representor type");
880 /* Check representor ID: */
881 for (p = 0; p < eth_da->nb_ports; ++p) {
882 if (spawn->pf_bond < 0) {
883 /* For non-LAG mode, allow and ignore pf. */
884 switch_info->pf_num = eth_da->ports[p];
885 repr_id = mlx5_representor_id_encode(switch_info,
888 for (f = 0; f < eth_da->nb_representor_ports; ++f) {
889 id = MLX5_REPRESENTOR_ID
890 (eth_da->ports[p], eth_da->type,
891 eth_da->representor_ports[f]);
902 * Spawn an Ethernet device from Verbs information.
905 * Backing DPDK device.
907 * Verbs device parameters (name, port, switch_info) to spawn.
909 * Device configuration parameters.
914 * A valid Ethernet device object on success, NULL otherwise and rte_errno
915 * is set. The following errors are defined:
917 * EBUSY: device is not supposed to be spawned.
918 * EEXIST: device is already spawned
920 static struct rte_eth_dev *
921 mlx5_dev_spawn(struct rte_device *dpdk_dev,
922 struct mlx5_dev_spawn_data *spawn,
923 struct mlx5_dev_config *config,
924 struct rte_eth_devargs *eth_da)
926 const struct mlx5_switch_info *switch_info = &spawn->info;
927 struct mlx5_dev_ctx_shared *sh = NULL;
928 struct ibv_port_attr port_attr;
929 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
930 struct rte_eth_dev *eth_dev = NULL;
931 struct mlx5_priv *priv = NULL;
933 unsigned int hw_padding = 0;
935 unsigned int tunnel_en = 0;
936 unsigned int mpls_en = 0;
937 unsigned int swp = 0;
938 unsigned int mprq = 0;
939 unsigned int mprq_min_stride_size_n = 0;
940 unsigned int mprq_max_stride_size_n = 0;
941 unsigned int mprq_min_stride_num_n = 0;
942 unsigned int mprq_max_stride_num_n = 0;
943 struct rte_ether_addr mac;
944 char name[RTE_ETH_NAME_MAX_LEN];
945 int own_domain_id = 0;
947 struct mlx5_port_info vport_info = { .query_flags = 0 };
950 /* Determine if this port representor is supposed to be spawned. */
951 if (switch_info->representor && dpdk_dev->devargs &&
952 !mlx5_representor_match(spawn, eth_da))
954 /* Build device name. */
955 if (spawn->pf_bond < 0) {
957 if (!switch_info->representor)
958 strlcpy(name, dpdk_dev->name, sizeof(name));
960 err = snprintf(name, sizeof(name), "%s_representor_%s%u",
962 switch_info->name_type ==
963 MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
964 switch_info->port_name);
966 /* Bonding device. */
967 if (!switch_info->representor) {
968 err = snprintf(name, sizeof(name), "%s_%s",
970 mlx5_os_get_dev_device_name(spawn->phys_dev));
972 err = snprintf(name, sizeof(name), "%s_%s_representor_c%dpf%d%s%u",
974 mlx5_os_get_dev_device_name(spawn->phys_dev),
975 switch_info->ctrl_num,
977 switch_info->name_type ==
978 MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
979 switch_info->port_name);
982 if (err >= (int)sizeof(name))
983 DRV_LOG(WARNING, "device name overflow %s", name);
984 /* check if the device is already spawned */
985 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
989 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
990 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
991 struct mlx5_mp_id mp_id;
993 eth_dev = rte_eth_dev_attach_secondary(name);
994 if (eth_dev == NULL) {
995 DRV_LOG(ERR, "can not attach rte ethdev");
999 eth_dev->device = dpdk_dev;
1000 eth_dev->dev_ops = &mlx5_dev_sec_ops;
1001 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1002 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1003 err = mlx5_proc_priv_init(eth_dev);
1006 mp_id.port_id = eth_dev->data->port_id;
1007 strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
1008 /* Receive command fd from primary process */
1009 err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
1012 /* Remap UAR for Tx queues. */
1013 err = mlx5_tx_uar_init_secondary(eth_dev, err);
1017 * Ethdev pointer is still required as input since
1018 * the primary device is not accessible from the
1019 * secondary process.
1021 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1022 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1025 mlx5_dev_close(eth_dev);
1029 * Some parameters ("tx_db_nc" in particularly) are needed in
1030 * advance to create dv/verbs device context. We proceed the
1031 * devargs here to get ones, and later proceed devargs again
1032 * to override some hardware settings.
1034 err = mlx5_args(config, dpdk_dev->devargs);
1037 DRV_LOG(ERR, "failed to process device arguments: %s",
1038 strerror(rte_errno));
1041 if (config->dv_miss_info) {
1042 if (switch_info->master || switch_info->representor)
1043 config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
1045 mlx5_malloc_mem_select(config->sys_mem_en);
1046 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
1049 config->devx = sh->devx;
1050 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
1051 config->dest_tir = 1;
1053 #ifdef HAVE_IBV_MLX5_MOD_SWP
1054 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
1057 * Multi-packet send is supported by ConnectX-4 Lx PF as well
1058 * as all ConnectX-5 devices.
1060 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1061 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
1063 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1064 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
1066 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
1067 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
1068 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
1069 DRV_LOG(DEBUG, "enhanced MPW is supported");
1070 mps = MLX5_MPW_ENHANCED;
1072 DRV_LOG(DEBUG, "MPW is supported");
1076 DRV_LOG(DEBUG, "MPW isn't supported");
1077 mps = MLX5_MPW_DISABLED;
1079 #ifdef HAVE_IBV_MLX5_MOD_SWP
1080 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
1081 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
1082 DRV_LOG(DEBUG, "SWP support: %u", swp);
1084 config->swp = !!swp;
1085 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1086 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
1087 struct mlx5dv_striding_rq_caps mprq_caps =
1088 dv_attr.striding_rq_caps;
1090 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
1091 mprq_caps.min_single_stride_log_num_of_bytes);
1092 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
1093 mprq_caps.max_single_stride_log_num_of_bytes);
1094 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
1095 mprq_caps.min_single_wqe_log_num_of_strides);
1096 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
1097 mprq_caps.max_single_wqe_log_num_of_strides);
1098 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
1099 mprq_caps.supported_qpts);
1100 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
1102 mprq_min_stride_size_n =
1103 mprq_caps.min_single_stride_log_num_of_bytes;
1104 mprq_max_stride_size_n =
1105 mprq_caps.max_single_stride_log_num_of_bytes;
1106 mprq_min_stride_num_n =
1107 mprq_caps.min_single_wqe_log_num_of_strides;
1108 mprq_max_stride_num_n =
1109 mprq_caps.max_single_wqe_log_num_of_strides;
1112 /* Rx CQE compression is enabled by default. */
1113 config->cqe_comp = 1;
1114 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1115 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
1116 tunnel_en = ((dv_attr.tunnel_offloads_caps &
1117 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
1118 (dv_attr.tunnel_offloads_caps &
1119 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
1120 (dv_attr.tunnel_offloads_caps &
1121 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
1123 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
1124 tunnel_en ? "" : "not ");
1127 "tunnel offloading disabled due to old OFED/rdma-core version");
1129 config->tunnel_en = tunnel_en;
1130 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1131 mpls_en = ((dv_attr.tunnel_offloads_caps &
1132 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
1133 (dv_attr.tunnel_offloads_caps &
1134 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
1135 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
1136 mpls_en ? "" : "not ");
1138 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
1139 " old OFED/rdma-core version or firmware configuration");
1141 config->mpls_en = mpls_en;
1142 /* Check port status. */
1143 err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
1145 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1148 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1149 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1153 if (port_attr.state != IBV_PORT_ACTIVE)
1154 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
1155 mlx5_glue->port_state_str(port_attr.state),
1157 /* Allocate private eth device data. */
1158 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1160 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1162 DRV_LOG(ERR, "priv allocation failure");
1167 priv->dev_port = spawn->phys_port;
1168 priv->pci_dev = spawn->pci_dev;
1169 priv->mtu = RTE_ETHER_MTU;
1170 /* Some internal functions rely on Netlink sockets, open them now. */
1171 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
1172 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1173 priv->representor = !!switch_info->representor;
1174 priv->master = !!switch_info->master;
1175 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1176 priv->vport_meta_tag = 0;
1177 priv->vport_meta_mask = 0;
1178 priv->pf_bond = spawn->pf_bond;
1180 * If we have E-Switch we should determine the vport attributes.
1181 * E-Switch may use either source vport field or reg_c[0] metadata
1182 * register to match on vport index. The engaged part of metadata
1183 * register is defined by mask.
1185 if (switch_info->representor || switch_info->master) {
1186 err = mlx5_glue->devx_port_query(sh->ctx,
1191 "can't query devx port %d on device %s",
1193 mlx5_os_get_dev_device_name(spawn->phys_dev));
1194 vport_info.query_flags = 0;
1197 if (vport_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1198 priv->vport_meta_tag = vport_info.vport_meta_tag;
1199 priv->vport_meta_mask = vport_info.vport_meta_mask;
1200 if (!priv->vport_meta_mask) {
1201 DRV_LOG(ERR, "vport zero mask for port %d"
1202 " on bonding device %s",
1204 mlx5_os_get_dev_device_name
1209 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1210 DRV_LOG(ERR, "invalid vport tag for port %d"
1211 " on bonding device %s",
1213 mlx5_os_get_dev_device_name
1219 if (vport_info.query_flags & MLX5_PORT_QUERY_VPORT) {
1220 priv->vport_id = vport_info.vport_id;
1221 } else if (spawn->pf_bond >= 0 &&
1222 (switch_info->representor || switch_info->master)) {
1223 DRV_LOG(ERR, "can't deduce vport index for port %d"
1224 " on bonding device %s",
1226 mlx5_os_get_dev_device_name(spawn->phys_dev));
1231 * Suppose vport index in compatible way. Kernel/rdma_core
1232 * support single E-Switch per PF configurations only and
1233 * vport_id field contains the vport index for associated VF,
1234 * which is deduced from representor port name.
1235 * For example, let's have the IB device port 10, it has
1236 * attached network device eth0, which has port name attribute
1237 * pf0vf2, we can deduce the VF number as 2, and set vport index
1238 * as 3 (2+1). This assigning schema should be changed if the
1239 * multiple E-Switch instances per PF configurations or/and PCI
1240 * subfunctions are added.
1242 priv->vport_id = switch_info->representor ?
1243 switch_info->port_name + 1 : -1;
1245 priv->representor_id = mlx5_representor_id_encode(switch_info,
1248 * Look for sibling devices in order to reuse their switch domain
1249 * if any, otherwise allocate one.
1251 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1252 const struct mlx5_priv *opriv =
1253 rte_eth_devices[port_id].data->dev_private;
1256 opriv->sh != priv->sh ||
1258 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1260 priv->domain_id = opriv->domain_id;
1263 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1264 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1267 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1268 strerror(rte_errno));
1273 /* Override some values set by hardware configuration. */
1274 mlx5_args(config, dpdk_dev->devargs);
1275 err = mlx5_dev_check_sibling_config(priv, config);
1278 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1279 IBV_DEVICE_RAW_IP_CSUM);
1280 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1281 (config->hw_csum ? "" : "not "));
1282 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1283 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1284 DRV_LOG(DEBUG, "counters are not supported");
1286 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
1287 if (config->dv_flow_en) {
1288 DRV_LOG(WARNING, "DV flow is not supported");
1289 config->dv_flow_en = 0;
1292 config->ind_table_max_size =
1293 sh->device_attr.max_rwq_indirection_table_size;
1295 * Remove this check once DPDK supports larger/variable
1296 * indirection tables.
1298 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1299 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1300 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1301 config->ind_table_max_size);
1302 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1303 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1304 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1305 (config->hw_vlan_strip ? "" : "not "));
1306 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1307 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1308 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1309 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1310 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1311 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1312 IBV_DEVICE_PCI_WRITE_END_PADDING);
1314 if (config->hw_padding && !hw_padding) {
1315 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1316 config->hw_padding = 0;
1317 } else if (config->hw_padding) {
1318 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1320 config->tso = (sh->device_attr.max_tso > 0 &&
1321 (sh->device_attr.tso_supported_qpts &
1322 (1 << IBV_QPT_RAW_PACKET)));
1324 config->tso_max_payload_sz = sh->device_attr.max_tso;
1326 * MPW is disabled by default, while the Enhanced MPW is enabled
1329 if (config->mps == MLX5_ARG_UNSET)
1330 config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1333 config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
1334 DRV_LOG(INFO, "%sMPS is %s",
1335 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
1336 config->mps == MLX5_MPW ? "legacy " : "",
1337 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1339 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
1344 /* Check relax ordering support. */
1345 if (!haswell_broadwell_cpu) {
1346 sh->cmng.relaxed_ordering_write =
1347 config->hca_attr.relaxed_ordering_write;
1348 sh->cmng.relaxed_ordering_read =
1349 config->hca_attr.relaxed_ordering_read;
1351 sh->cmng.relaxed_ordering_read = 0;
1352 sh->cmng.relaxed_ordering_write = 0;
1354 sh->rq_ts_format = config->hca_attr.rq_ts_format;
1355 sh->sq_ts_format = config->hca_attr.sq_ts_format;
1356 sh->qp_ts_format = config->hca_attr.qp_ts_format;
1357 /* Check for LRO support. */
1358 if (config->dest_tir && config->hca_attr.lro_cap &&
1359 config->dv_flow_en) {
1360 /* TBD check tunnel lro caps. */
1361 config->lro.supported = config->hca_attr.lro_cap;
1362 DRV_LOG(DEBUG, "Device supports LRO");
1364 * If LRO timeout is not configured by application,
1365 * use the minimal supported value.
1367 if (!config->lro.timeout)
1368 config->lro.timeout =
1369 config->hca_attr.lro_timer_supported_periods[0];
1370 DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
1371 config->lro.timeout);
1372 DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
1373 "required for coalescing is %d bytes",
1374 config->hca_attr.lro_min_mss_size);
1376 #if defined(HAVE_MLX5DV_DR) && \
1377 (defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
1378 defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
1379 if (config->hca_attr.qos.sup &&
1380 config->hca_attr.qos.flow_meter_old &&
1381 config->dv_flow_en) {
1382 uint8_t reg_c_mask =
1383 config->hca_attr.qos.flow_meter_reg_c_ids;
1385 * Meter needs two REG_C's for color match and pre-sfx
1386 * flow match. Here get the REG_C for color match.
1387 * REG_C_0 and REG_C_1 is reserved for metadata feature.
1390 if (__builtin_popcount(reg_c_mask) < 1) {
1392 DRV_LOG(WARNING, "No available register for"
1396 * The meter color register is used by the
1397 * flow-hit feature as well.
1398 * The flow-hit feature must use REG_C_3
1399 * Prefer REG_C_3 if it is available.
1401 if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
1402 priv->mtr_color_reg = REG_C_3;
1404 priv->mtr_color_reg = ffs(reg_c_mask)
1407 priv->mtr_reg_share =
1408 config->hca_attr.qos.flow_meter;
1409 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
1410 priv->mtr_color_reg);
1413 if (config->hca_attr.qos.sup &&
1414 config->hca_attr.qos.flow_meter_aso_sup) {
1415 uint32_t log_obj_size =
1416 rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
1418 config->hca_attr.qos.log_meter_aso_granularity &&
1420 config->hca_attr.qos.log_meter_aso_max_alloc)
1421 sh->meter_aso_en = 1;
1424 err = mlx5_aso_flow_mtrs_mng_init(priv->sh);
1430 if (config->hca_attr.flow.tunnel_header_0_1)
1431 sh->tunnel_header_0_1 = 1;
1433 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
1434 if (config->hca_attr.flow_hit_aso &&
1435 priv->mtr_color_reg == REG_C_3) {
1436 sh->flow_hit_aso_en = 1;
1437 err = mlx5_flow_aso_age_mng_init(sh);
1442 DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
1444 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
1445 #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
1446 defined(HAVE_MLX5_DR_ACTION_ASO_CT)
1447 if (config->hca_attr.ct_offload &&
1448 priv->mtr_color_reg == REG_C_3) {
1449 err = mlx5_flow_aso_ct_mng_init(sh);
1454 DRV_LOG(DEBUG, "CT ASO is supported.");
1457 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
1458 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1459 if (config->hca_attr.log_max_ft_sampler_num > 0 &&
1460 config->dv_flow_en) {
1461 priv->sampler_en = 1;
1462 DRV_LOG(DEBUG, "Sampler enabled!");
1464 priv->sampler_en = 0;
1465 if (!config->hca_attr.log_max_ft_sampler_num)
1467 "No available register for sampler.");
1469 DRV_LOG(DEBUG, "DV flow is not supported!");
1473 if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
1474 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
1475 DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
1476 config->cqe_comp = 0;
1478 if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
1479 (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
1480 DRV_LOG(WARNING, "Flow Tag CQE compression"
1481 " format isn't supported.");
1482 config->cqe_comp = 0;
1484 if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
1485 (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
1486 DRV_LOG(WARNING, "L3/L4 Header CQE compression"
1487 " format isn't supported.");
1488 config->cqe_comp = 0;
1490 DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
1491 config->cqe_comp ? "" : "not ");
1492 if (config->tx_pp) {
1493 DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
1494 config->hca_attr.dev_freq_khz);
1495 DRV_LOG(DEBUG, "Packet pacing is %ssupported",
1496 config->hca_attr.qos.packet_pacing ? "" : "not ");
1497 DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
1498 config->hca_attr.cross_channel ? "" : "not ");
1499 DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
1500 config->hca_attr.wqe_index_ignore ? "" : "not ");
1501 DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
1502 config->hca_attr.non_wire_sq ? "" : "not ");
1503 DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
1504 config->hca_attr.log_max_static_sq_wq ? "" : "not ",
1505 config->hca_attr.log_max_static_sq_wq);
1506 DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
1507 config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
1508 if (!config->devx) {
1509 DRV_LOG(ERR, "DevX is required for packet pacing");
1513 if (!config->hca_attr.qos.packet_pacing) {
1514 DRV_LOG(ERR, "Packet pacing is not supported");
1518 if (!config->hca_attr.cross_channel) {
1519 DRV_LOG(ERR, "Cross channel operations are"
1520 " required for packet pacing");
1524 if (!config->hca_attr.wqe_index_ignore) {
1525 DRV_LOG(ERR, "WQE index ignore feature is"
1526 " required for packet pacing");
1530 if (!config->hca_attr.non_wire_sq) {
1531 DRV_LOG(ERR, "Non-wire SQ feature is"
1532 " required for packet pacing");
1536 if (!config->hca_attr.log_max_static_sq_wq) {
1537 DRV_LOG(ERR, "Static WQE SQ feature is"
1538 " required for packet pacing");
1542 if (!config->hca_attr.qos.wqe_rate_pp) {
1543 DRV_LOG(ERR, "WQE rate mode is required"
1544 " for packet pacing");
1548 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1549 DRV_LOG(ERR, "DevX does not provide UAR offset,"
1550 " can't create queues for packet pacing");
1556 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
1558 err = config->hca_attr.access_register_user ?
1559 mlx5_devx_cmd_register_read
1560 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
1561 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
1565 /* MTUTC register is read successfully. */
1566 ts_mode = MLX5_GET(register_mtutc, reg,
1568 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1569 config->rt_timestamp = 1;
1571 /* Kernel does not support register reading. */
1572 if (config->hca_attr.dev_freq_khz ==
1573 (NS_PER_S / MS_PER_S))
1574 config->rt_timestamp = 1;
1578 * If HW has bug working with tunnel packet decapsulation and
1579 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
1580 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1582 if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
1583 config->hw_fcs_strip = 0;
1584 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1585 (config->hw_fcs_strip ? "" : "not "));
1586 if (config->mprq.enabled && mprq) {
1587 if (config->mprq.stride_num_n &&
1588 (config->mprq.stride_num_n > mprq_max_stride_num_n ||
1589 config->mprq.stride_num_n < mprq_min_stride_num_n)) {
1590 config->mprq.stride_num_n =
1591 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1592 mprq_min_stride_num_n),
1593 mprq_max_stride_num_n);
1595 "the number of strides"
1596 " for Multi-Packet RQ is out of range,"
1597 " setting default value (%u)",
1598 1 << config->mprq.stride_num_n);
1600 if (config->mprq.stride_size_n &&
1601 (config->mprq.stride_size_n > mprq_max_stride_size_n ||
1602 config->mprq.stride_size_n < mprq_min_stride_size_n)) {
1603 config->mprq.stride_size_n =
1604 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
1605 mprq_min_stride_size_n),
1606 mprq_max_stride_size_n);
1608 "the size of a stride"
1609 " for Multi-Packet RQ is out of range,"
1610 " setting default value (%u)",
1611 1 << config->mprq.stride_size_n);
1613 config->mprq.min_stride_size_n = mprq_min_stride_size_n;
1614 config->mprq.max_stride_size_n = mprq_max_stride_size_n;
1615 } else if (config->mprq.enabled && !mprq) {
1616 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1617 config->mprq.enabled = 0;
1619 if (config->max_dump_files_num == 0)
1620 config->max_dump_files_num = 128;
1621 eth_dev = rte_eth_dev_allocate(name);
1622 if (eth_dev == NULL) {
1623 DRV_LOG(ERR, "can not allocate rte ethdev");
1627 if (priv->representor) {
1628 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1629 eth_dev->data->representor_id = priv->representor_id;
1631 priv->mp_id.port_id = eth_dev->data->port_id;
1632 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
1634 * Store associated network device interface index. This index
1635 * is permanent throughout the lifetime of device. So, we may store
1636 * the ifindex here and use the cached value further.
1638 MLX5_ASSERT(spawn->ifindex);
1639 priv->if_index = spawn->ifindex;
1640 eth_dev->data->dev_private = priv;
1641 priv->dev_data = eth_dev->data;
1642 eth_dev->data->mac_addrs = priv->mac;
1643 eth_dev->device = dpdk_dev;
1644 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1645 /* Configure the first MAC address by default. */
1646 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1648 "port %u cannot get MAC address, is mlx5_en"
1649 " loaded? (errno: %s)",
1650 eth_dev->data->port_id, strerror(rte_errno));
1655 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1656 eth_dev->data->port_id,
1657 mac.addr_bytes[0], mac.addr_bytes[1],
1658 mac.addr_bytes[2], mac.addr_bytes[3],
1659 mac.addr_bytes[4], mac.addr_bytes[5]);
1660 #ifdef RTE_LIBRTE_MLX5_DEBUG
1662 char ifname[MLX5_NAMESIZE];
1664 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1665 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1666 eth_dev->data->port_id, ifname);
1668 DRV_LOG(DEBUG, "port %u ifname is unknown",
1669 eth_dev->data->port_id);
1672 /* Get actual MTU if possible. */
1673 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1678 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1680 /* Initialize burst functions to prevent crashes before link-up. */
1681 eth_dev->rx_pkt_burst = removed_rx_burst;
1682 eth_dev->tx_pkt_burst = removed_tx_burst;
1683 eth_dev->dev_ops = &mlx5_dev_ops;
1684 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1685 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1686 eth_dev->rx_queue_count = mlx5_rx_queue_count;
1687 /* Register MAC address. */
1688 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1689 if (config->vf && config->vf_nl_en)
1690 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1691 mlx5_ifindex(eth_dev),
1692 eth_dev->data->mac_addrs,
1693 MLX5_MAX_MAC_ADDRESSES);
1694 priv->ctrl_flows = 0;
1695 rte_spinlock_init(&priv->flow_list_lock);
1696 TAILQ_INIT(&priv->flow_meters);
1697 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
1698 if (!priv->mtr_profile_tbl)
1700 /* Hint libmlx5 to use PMD allocator for data plane resources */
1701 mlx5_glue->dv_set_context_attr(sh->ctx,
1702 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1703 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
1704 .alloc = &mlx5_alloc_verbs_buf,
1705 .free = &mlx5_free_verbs_buf,
1708 /* Bring Ethernet device up. */
1709 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1710 eth_dev->data->port_id);
1711 mlx5_set_link_up(eth_dev);
1713 * Even though the interrupt handler is not installed yet,
1714 * interrupts will still trigger on the async_fd from
1715 * Verbs context returned by ibv_open_device().
1717 mlx5_link_update(eth_dev, 0);
1718 #ifdef HAVE_MLX5DV_DR_ESWITCH
1719 if (!(config->hca_attr.eswitch_manager && config->dv_flow_en &&
1720 (switch_info->representor || switch_info->master)))
1721 config->dv_esw_en = 0;
1723 config->dv_esw_en = 0;
1725 /* Detect minimal data bytes to inline. */
1726 mlx5_set_min_inline(spawn, config);
1727 /* Store device configuration on private structure. */
1728 priv->config = *config;
1729 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
1730 icfg[i].release_mem_en = !!config->reclaim_mode;
1731 if (config->reclaim_mode)
1732 icfg[i].per_core_cache = 0;
1733 priv->flows[i] = mlx5_ipool_create(&icfg[i]);
1734 if (!priv->flows[i])
1737 /* Create context for virtual machine VLAN workaround. */
1738 priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1739 if (config->dv_flow_en) {
1740 err = mlx5_alloc_shared_dr(priv);
1744 if (config->devx && config->dv_flow_en && config->dest_tir) {
1745 priv->obj_ops = devx_obj_ops;
1746 priv->obj_ops.drop_action_create =
1747 ibv_obj_ops.drop_action_create;
1748 priv->obj_ops.drop_action_destroy =
1749 ibv_obj_ops.drop_action_destroy;
1750 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1751 priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
1753 if (config->dv_esw_en)
1754 priv->obj_ops.txq_obj_modify =
1755 ibv_obj_ops.txq_obj_modify;
1757 /* Use specific wrappers for Tx object. */
1758 priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
1759 priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
1760 mlx5_queue_counter_id_prepare(eth_dev);
1761 priv->obj_ops.lb_dummy_queue_create =
1762 mlx5_rxq_ibv_obj_dummy_lb_create;
1763 priv->obj_ops.lb_dummy_queue_release =
1764 mlx5_rxq_ibv_obj_dummy_lb_release;
1766 priv->obj_ops = ibv_obj_ops;
1768 priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
1769 if (!priv->drop_queue.hrxq)
1771 /* Supported Verbs flow priority number detection. */
1772 err = mlx5_flow_discover_priorities(eth_dev);
1777 priv->config.flow_prio = err;
1778 if (!priv->config.dv_esw_en &&
1779 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1780 DRV_LOG(WARNING, "metadata mode %u is not supported "
1781 "(no E-Switch)", priv->config.dv_xmeta_en);
1782 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1784 mlx5_set_metadata_mask(eth_dev);
1785 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1786 !priv->sh->dv_regc0_mask) {
1787 DRV_LOG(ERR, "metadata mode %u is not supported "
1788 "(no metadata reg_c[0] is available)",
1789 priv->config.dv_xmeta_en);
1793 mlx5_list_create(&priv->hrxqs, "hrxq", 0, eth_dev,
1794 mlx5_hrxq_create_cb,
1796 mlx5_hrxq_remove_cb);
1797 /* Query availability of metadata reg_c's. */
1798 err = mlx5_flow_discover_mreg_c(eth_dev);
1803 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1805 "port %u extensive metadata register is not supported",
1806 eth_dev->data->port_id);
1807 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1808 DRV_LOG(ERR, "metadata mode %u is not supported "
1809 "(no metadata registers available)",
1810 priv->config.dv_xmeta_en);
1815 if (priv->config.dv_flow_en &&
1816 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1817 mlx5_flow_ext_mreg_supported(eth_dev) &&
1818 priv->sh->dv_regc0_mask) {
1819 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1820 MLX5_FLOW_MREG_HTABLE_SZ,
1822 flow_dv_mreg_create_cb,
1823 flow_dv_mreg_match_cb,
1824 flow_dv_mreg_remove_cb);
1825 if (!priv->mreg_cp_tbl) {
1829 priv->mreg_cp_tbl->ctx = eth_dev;
1831 rte_spinlock_init(&priv->shared_act_sl);
1832 mlx5_flow_counter_mode_config(eth_dev);
1833 if (priv->config.dv_flow_en)
1834 eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1838 if (priv->mreg_cp_tbl)
1839 mlx5_hlist_destroy(priv->mreg_cp_tbl);
1841 mlx5_os_free_shared_dr(priv);
1842 if (priv->nl_socket_route >= 0)
1843 close(priv->nl_socket_route);
1844 if (priv->nl_socket_rdma >= 0)
1845 close(priv->nl_socket_rdma);
1846 if (priv->vmwa_context)
1847 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1848 if (eth_dev && priv->drop_queue.hrxq)
1849 mlx5_drop_action_destroy(eth_dev);
1850 if (priv->mtr_profile_tbl)
1851 mlx5_l3t_destroy(priv->mtr_profile_tbl);
1853 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1854 mlx5_list_destroy(&priv->hrxqs);
1856 if (eth_dev != NULL)
1857 eth_dev->data->dev_private = NULL;
1859 if (eth_dev != NULL) {
1860 /* mac_addrs must not be freed alone because part of
1863 eth_dev->data->mac_addrs = NULL;
1864 rte_eth_dev_release_port(eth_dev);
1867 mlx5_free_shared_dev_ctx(sh);
1868 MLX5_ASSERT(err > 0);
1874 * Comparison callback to sort device data.
1876 * This is meant to be used with qsort().
1879 * Pointer to pointer to first data object.
1881 * Pointer to pointer to second data object.
1884 * 0 if both objects are equal, less than 0 if the first argument is less
1885 * than the second, greater than 0 otherwise.
1888 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1890 const struct mlx5_switch_info *si_a =
1891 &((const struct mlx5_dev_spawn_data *)a)->info;
1892 const struct mlx5_switch_info *si_b =
1893 &((const struct mlx5_dev_spawn_data *)b)->info;
1896 /* Master device first. */
1897 ret = si_b->master - si_a->master;
1900 /* Then representor devices. */
1901 ret = si_b->representor - si_a->representor;
1904 /* Unidentified devices come last in no specific order. */
1905 if (!si_a->representor)
1907 /* Order representors by name. */
1908 return si_a->port_name - si_b->port_name;
1912 * Match PCI information for possible slaves of bonding device.
1914 * @param[in] ibv_dev
1915 * Pointer to Infiniband device structure.
1916 * @param[in] pci_dev
1917 * Pointer to primary PCI address structure to match.
1918 * @param[in] nl_rdma
1919 * Netlink RDMA group socket handle.
1921 * Rerepsentor owner PF index.
1922 * @param[out] bond_info
1923 * Pointer to bonding information.
1926 * negative value if no bonding device found, otherwise
1927 * positive index of slave PF in bonding.
1930 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
1931 const struct rte_pci_addr *pci_dev,
1932 int nl_rdma, uint16_t owner,
1933 struct mlx5_bond_info *bond_info)
1935 char ifname[IF_NAMESIZE + 1];
1936 unsigned int ifindex;
1938 FILE *bond_file = NULL, *file;
1943 * Try to get master device name. If something goes
1944 * wrong suppose the lack of kernel support and no
1947 memset(bond_info, 0, sizeof(*bond_info));
1950 if (!strstr(ibv_dev->name, "bond"))
1952 np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
1956 * The Master device might not be on the predefined
1957 * port (not on port index 1, it is not garanted),
1958 * we have to scan all Infiniband device port and
1961 for (i = 1; i <= np; ++i) {
1962 /* Check whether Infiniband port is populated. */
1963 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
1966 if (!if_indextoname(ifindex, ifname))
1968 /* Try to read bonding slave names from sysfs. */
1970 "/sys/class/net/%s/master/bonding/slaves", ifname);
1971 bond_file = fopen(slaves, "r");
1977 /* Use safe format to check maximal buffer length. */
1978 MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1979 while (fscanf(bond_file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1980 char tmp_str[IF_NAMESIZE + 32];
1981 struct rte_pci_addr pci_addr;
1982 struct mlx5_switch_info info;
1984 /* Process slave interface names in the loop. */
1985 snprintf(tmp_str, sizeof(tmp_str),
1986 "/sys/class/net/%s", ifname);
1987 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
1988 DRV_LOG(WARNING, "can not get PCI address"
1989 " for netdev \"%s\"", ifname);
1992 /* Slave interface PCI address match found. */
1993 snprintf(tmp_str, sizeof(tmp_str),
1994 "/sys/class/net/%s/phys_port_name", ifname);
1995 file = fopen(tmp_str, "rb");
1998 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1999 if (fscanf(file, "%32s", tmp_str) == 1)
2000 mlx5_translate_port_name(tmp_str, &info);
2002 /* Only process PF ports. */
2003 if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_LEGACY &&
2004 info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2006 /* Check max bonding member. */
2007 if (info.port_name >= MLX5_BOND_MAX_PORTS) {
2008 DRV_LOG(WARNING, "bonding index out of range, "
2009 "please increase MLX5_BOND_MAX_PORTS: %s",
2013 /* Match PCI address, allows BDF0+pfx or BDFx+pfx. */
2014 if (pci_dev->domain == pci_addr.domain &&
2015 pci_dev->bus == pci_addr.bus &&
2016 pci_dev->devid == pci_addr.devid &&
2017 ((pci_dev->function == 0 &&
2018 pci_dev->function + owner == pci_addr.function) ||
2019 (pci_dev->function == owner &&
2020 pci_addr.function == owner)))
2021 pf = info.port_name;
2023 snprintf(tmp_str, sizeof(tmp_str),
2024 "/sys/class/net/%s/ifindex", ifname);
2025 file = fopen(tmp_str, "rb");
2028 ret = fscanf(file, "%u", &ifindex);
2032 /* Save bonding info. */
2033 strncpy(bond_info->ports[info.port_name].ifname, ifname,
2034 sizeof(bond_info->ports[0].ifname));
2035 bond_info->ports[info.port_name].pci_addr = pci_addr;
2036 bond_info->ports[info.port_name].ifindex = ifindex;
2037 bond_info->n_port++;
2040 /* Get bond interface info */
2041 ret = mlx5_sysfs_bond_info(ifindex, &bond_info->ifindex,
2044 DRV_LOG(ERR, "unable to get bond info: %s",
2045 strerror(rte_errno));
2047 DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
2048 ifindex, bond_info->ifindex, bond_info->ifname);
2054 * Register a PCI device within bonding.
2056 * This function spawns Ethernet devices out of a given PCI device and
2057 * bonding owner PF index.
2059 * @param[in] pci_dev
2060 * PCI device information.
2061 * @param[in] req_eth_da
2062 * Requested ethdev device argument.
2063 * @param[in] owner_id
2064 * Requested owner PF port ID within bonding device, default to 0.
2067 * 0 on success, a negative errno value otherwise and rte_errno is set.
2070 mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
2071 struct rte_eth_devargs *req_eth_da,
2074 struct ibv_device **ibv_list;
2076 * Number of found IB Devices matching with requested PCI BDF.
2077 * nd != 1 means there are multiple IB devices over the same
2078 * PCI device and we have representors and master.
2080 unsigned int nd = 0;
2082 * Number of found IB device Ports. nd = 1 and np = 1..n means
2083 * we have the single multiport IB device, and there may be
2084 * representors attached to some of found ports.
2086 unsigned int np = 0;
2088 * Number of DPDK ethernet devices to Spawn - either over
2089 * multiple IB devices or multiple ports of single IB device.
2090 * Actually this is the number of iterations to spawn.
2092 unsigned int ns = 0;
2095 * < 0 - no bonding device (single one)
2096 * >= 0 - bonding device (value is slave PF index)
2099 struct mlx5_dev_spawn_data *list = NULL;
2100 struct mlx5_dev_config dev_config;
2101 unsigned int dev_config_vf;
2102 struct rte_eth_devargs eth_da = *req_eth_da;
2103 struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
2104 struct mlx5_bond_info bond_info;
2107 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2108 mlx5_pmd_socket_init();
2109 ret = mlx5_init_once();
2111 DRV_LOG(ERR, "unable to init PMD global data: %s",
2112 strerror(rte_errno));
2116 ibv_list = mlx5_glue->get_device_list(&ret);
2118 rte_errno = errno ? errno : ENOSYS;
2119 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
2123 * First scan the list of all Infiniband devices to find
2124 * matching ones, gathering into the list.
2126 struct ibv_device *ibv_match[ret + 1];
2127 int nl_route = mlx5_nl_init(NETLINK_ROUTE);
2128 int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
2132 struct rte_pci_addr pci_addr;
2134 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
2135 bd = mlx5_device_bond_pci_match
2136 (ibv_list[ret], &owner_pci, nl_rdma, owner_id,
2140 * Bonding device detected. Only one match is allowed,
2141 * the bonding is supported over multi-port IB device,
2142 * there should be no matches on representor PCI
2143 * functions or non VF LAG bonding devices with
2144 * specified address.
2148 "multiple PCI match on bonding device"
2149 "\"%s\" found", ibv_list[ret]->name);
2154 /* Amend owner pci address if owner PF ID specified. */
2155 if (eth_da.nb_representor_ports)
2156 owner_pci.function += owner_id;
2157 DRV_LOG(INFO, "PCI information matches for"
2158 " slave %d bonding device \"%s\"",
2159 bd, ibv_list[ret]->name);
2160 ibv_match[nd++] = ibv_list[ret];
2163 /* Bonding device not found. */
2164 if (mlx5_dev_to_pci_addr
2165 (ibv_list[ret]->ibdev_path, &pci_addr))
2167 if (owner_pci.domain != pci_addr.domain ||
2168 owner_pci.bus != pci_addr.bus ||
2169 owner_pci.devid != pci_addr.devid ||
2170 owner_pci.function != pci_addr.function)
2172 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
2173 ibv_list[ret]->name);
2174 ibv_match[nd++] = ibv_list[ret];
2177 ibv_match[nd] = NULL;
2179 /* No device matches, just complain and bail out. */
2181 "no Verbs device matches PCI device " PCI_PRI_FMT ","
2182 " are kernel drivers loaded?",
2183 owner_pci.domain, owner_pci.bus,
2184 owner_pci.devid, owner_pci.function);
2191 * Found single matching device may have multiple ports.
2192 * Each port may be representor, we have to check the port
2193 * number and check the representors existence.
2196 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
2198 DRV_LOG(WARNING, "can not get IB device \"%s\""
2199 " ports number", ibv_match[0]->name);
2200 if (bd >= 0 && !np) {
2201 DRV_LOG(ERR, "can not get ports"
2202 " for bonding device");
2208 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
2211 * This may happen if there is VF LAG kernel support and
2212 * application is compiled with older rdma_core library.
2215 "No kernel/verbs support for VF LAG bonding found.");
2216 rte_errno = ENOTSUP;
2222 * Now we can determine the maximal
2223 * amount of devices to be spawned.
2225 list = mlx5_malloc(MLX5_MEM_ZERO,
2226 sizeof(struct mlx5_dev_spawn_data) *
2228 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
2230 DRV_LOG(ERR, "spawn data array allocation failure");
2235 if (bd >= 0 || np > 1) {
2237 * Single IB device with multiple ports found,
2238 * it may be E-Switch master device and representors.
2239 * We have to perform identification through the ports.
2241 MLX5_ASSERT(nl_rdma >= 0);
2242 MLX5_ASSERT(ns == 0);
2243 MLX5_ASSERT(nd == 1);
2245 for (i = 1; i <= np; ++i) {
2246 list[ns].bond_info = &bond_info;
2247 list[ns].max_port = np;
2248 list[ns].phys_port = i;
2249 list[ns].phys_dev = ibv_match[0];
2250 list[ns].eth_dev = NULL;
2251 list[ns].pci_dev = pci_dev;
2252 list[ns].pf_bond = bd;
2253 list[ns].ifindex = mlx5_nl_ifindex
2255 mlx5_os_get_dev_device_name
2256 (list[ns].phys_dev), i);
2257 if (!list[ns].ifindex) {
2259 * No network interface index found for the
2260 * specified port, it means there is no
2261 * representor on this port. It's OK,
2262 * there can be disabled ports, for example
2263 * if sriov_numvfs < sriov_totalvfs.
2269 ret = mlx5_nl_switch_info
2273 if (ret || (!list[ns].info.representor &&
2274 !list[ns].info.master)) {
2276 * We failed to recognize representors with
2277 * Netlink, let's try to perform the task
2280 ret = mlx5_sysfs_switch_info
2284 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2285 if (!ret && bd >= 0) {
2286 switch (list[ns].info.name_type) {
2287 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2288 if (list[ns].info.port_name == bd)
2291 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2293 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2295 case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2296 if (list[ns].info.pf_num == bd)
2305 if (!ret && (list[ns].info.representor ^
2306 list[ns].info.master))
2311 "unable to recognize master/representors"
2312 " on the IB device with multiple ports");
2319 * The existence of several matching entries (nd > 1) means
2320 * port representors have been instantiated. No existing Verbs
2321 * call nor sysfs entries can tell them apart, this can only
2322 * be done through Netlink calls assuming kernel drivers are
2323 * recent enough to support them.
2325 * In the event of identification failure through Netlink,
2326 * try again through sysfs, then:
2328 * 1. A single IB device matches (nd == 1) with single
2329 * port (np=0/1) and is not a representor, assume
2330 * no switch support.
2332 * 2. Otherwise no safe assumptions can be made;
2333 * complain louder and bail out.
2335 for (i = 0; i != nd; ++i) {
2336 memset(&list[ns].info, 0, sizeof(list[ns].info));
2337 list[ns].bond_info = NULL;
2338 list[ns].max_port = 1;
2339 list[ns].phys_port = 1;
2340 list[ns].phys_dev = ibv_match[i];
2341 list[ns].eth_dev = NULL;
2342 list[ns].pci_dev = pci_dev;
2343 list[ns].pf_bond = -1;
2344 list[ns].ifindex = 0;
2346 list[ns].ifindex = mlx5_nl_ifindex
2348 mlx5_os_get_dev_device_name
2349 (list[ns].phys_dev), 1);
2350 if (!list[ns].ifindex) {
2351 char ifname[IF_NAMESIZE];
2354 * Netlink failed, it may happen with old
2355 * ib_core kernel driver (before 4.16).
2356 * We can assume there is old driver because
2357 * here we are processing single ports IB
2358 * devices. Let's try sysfs to retrieve
2359 * the ifindex. The method works for
2360 * master device only.
2364 * Multiple devices found, assume
2365 * representors, can not distinguish
2366 * master/representor and retrieve
2367 * ifindex via sysfs.
2371 ret = mlx5_get_ifname_sysfs
2372 (ibv_match[i]->ibdev_path, ifname);
2375 if_nametoindex(ifname);
2376 if (!list[ns].ifindex) {
2378 * No network interface index found
2379 * for the specified device, it means
2380 * there it is neither representor
2388 ret = mlx5_nl_switch_info
2392 if (ret || (!list[ns].info.representor &&
2393 !list[ns].info.master)) {
2395 * We failed to recognize representors with
2396 * Netlink, let's try to perform the task
2399 ret = mlx5_sysfs_switch_info
2403 if (!ret && (list[ns].info.representor ^
2404 list[ns].info.master)) {
2406 } else if ((nd == 1) &&
2407 !list[ns].info.representor &&
2408 !list[ns].info.master) {
2410 * Single IB device with
2411 * one physical port and
2412 * attached network device.
2413 * May be SRIOV is not enabled
2414 * or there is no representors.
2416 DRV_LOG(INFO, "no E-Switch support detected");
2423 "unable to recognize master/representors"
2424 " on the multiple IB devices");
2430 * New kernels may add the switch_id attribute for the case
2431 * there is no E-Switch and we wrongly recognized the
2432 * only device as master. Override this if there is the
2433 * single device with single port and new device name
2437 list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
2438 list[0].info.master = 0;
2439 list[0].info.representor = 0;
2444 * Sort list to probe devices in natural order for users convenience
2445 * (i.e. master first, then representors from lowest to highest ID).
2447 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2448 /* Device specific configuration. */
2449 switch (pci_dev->id.device_id) {
2450 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2451 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2452 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2453 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2454 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2455 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2456 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
2463 if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) {
2464 /* Set devargs default values. */
2465 if (eth_da.nb_mh_controllers == 0) {
2466 eth_da.nb_mh_controllers = 1;
2467 eth_da.mh_controllers[0] = 0;
2469 if (eth_da.nb_ports == 0 && ns > 0) {
2470 if (list[0].pf_bond >= 0 && list[0].info.representor)
2471 DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s",
2472 pci_dev->device.devargs->args);
2473 eth_da.nb_ports = 1;
2474 eth_da.ports[0] = list[0].info.pf_num;
2476 if (eth_da.nb_representor_ports == 0) {
2477 eth_da.nb_representor_ports = 1;
2478 eth_da.representor_ports[0] = 0;
2481 for (i = 0; i != ns; ++i) {
2484 /* Default configuration. */
2485 memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
2486 dev_config.vf = dev_config_vf;
2487 dev_config.mps = MLX5_ARG_UNSET;
2488 dev_config.dbnc = MLX5_ARG_UNSET;
2489 dev_config.rx_vec_en = 1;
2490 dev_config.txq_inline_max = MLX5_ARG_UNSET;
2491 dev_config.txq_inline_min = MLX5_ARG_UNSET;
2492 dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
2493 dev_config.txqs_inline = MLX5_ARG_UNSET;
2494 dev_config.vf_nl_en = 1;
2495 dev_config.mr_ext_memseg_en = 1;
2496 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2497 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2498 dev_config.dv_esw_en = 1;
2499 dev_config.dv_flow_en = 1;
2500 dev_config.decap_en = 1;
2501 dev_config.log_hp_size = MLX5_ARG_UNSET;
2502 dev_config.allow_duplicate_pattern = 1;
2503 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2507 if (!list[i].eth_dev) {
2508 if (rte_errno != EBUSY && rte_errno != EEXIST)
2510 /* Device is disabled or already spawned. Ignore it. */
2513 restore = list[i].eth_dev->data->dev_flags;
2514 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2515 /* Restore non-PCI flags cleared by the above call. */
2516 list[i].eth_dev->data->dev_flags |= restore;
2517 rte_eth_dev_probing_finish(list[i].eth_dev);
2521 "probe of PCI device " PCI_PRI_FMT " aborted after"
2522 " encountering an error: %s",
2523 owner_pci.domain, owner_pci.bus,
2524 owner_pci.devid, owner_pci.function,
2525 strerror(rte_errno));
2529 if (!list[i].eth_dev)
2531 mlx5_dev_close(list[i].eth_dev);
2532 /* mac_addrs must not be freed because in dev_private */
2533 list[i].eth_dev->data->mac_addrs = NULL;
2534 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2536 /* Restore original error. */
2543 * Do the routine cleanup:
2544 * - close opened Netlink sockets
2545 * - free allocated spawn data array
2546 * - free the Infiniband device list
2554 MLX5_ASSERT(ibv_list);
2555 mlx5_glue->free_device_list(ibv_list);
2560 * DPDK callback to register a PCI device.
2562 * This function spawns Ethernet devices out of a given PCI device.
2564 * @param[in] pci_drv
2565 * PCI driver structure (mlx5_driver).
2566 * @param[in] pci_dev
2567 * PCI device information.
2570 * 0 on success, a negative errno value otherwise and rte_errno is set.
2573 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2574 struct rte_pci_device *pci_dev)
2576 struct rte_eth_devargs eth_da = { .type = RTE_ETH_REPRESENTOR_NONE };
2580 if (pci_dev->device.devargs) {
2581 /* Parse representor information from device argument. */
2582 if (pci_dev->device.devargs->cls_str)
2583 ret = rte_eth_devargs_parse
2584 (pci_dev->device.devargs->cls_str, ð_da);
2586 DRV_LOG(ERR, "failed to parse device arguments: %s",
2587 pci_dev->device.devargs->cls_str);
2590 if (eth_da.type == RTE_ETH_REPRESENTOR_NONE) {
2591 /* Support legacy device argument */
2592 ret = rte_eth_devargs_parse
2593 (pci_dev->device.devargs->args, ð_da);
2595 DRV_LOG(ERR, "failed to parse device arguments: %s",
2596 pci_dev->device.devargs->args);
2602 if (eth_da.nb_ports > 0) {
2603 /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
2604 for (p = 0; p < eth_da.nb_ports; p++)
2605 ret = mlx5_os_pci_probe_pf(pci_dev, ð_da,
2608 ret = mlx5_os_pci_probe_pf(pci_dev, ð_da, 0);
2614 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
2619 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2620 /* Get environment variable to store. */
2621 env = getenv(MLX5_SHUT_UP_BF);
2622 value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
2623 if (config->dbnc == MLX5_ARG_UNSET)
2624 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
2626 setenv(MLX5_SHUT_UP_BF,
2627 config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
2632 mlx5_restore_doorbell_mapping_env(int value)
2634 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2635 /* Restore the original environment variable state. */
2636 if (value == MLX5_ARG_UNSET)
2637 unsetenv(MLX5_SHUT_UP_BF);
2639 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
2643 * Extract pdn of PD object using DV API.
2646 * Pointer to the verbs PD object.
2648 * Pointer to the PD object number variable.
2651 * 0 on success, error value otherwise.
2654 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
2656 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2657 struct mlx5dv_obj obj;
2658 struct mlx5dv_pd pd_info;
2662 obj.pd.out = &pd_info;
2663 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
2665 DRV_LOG(DEBUG, "Fail to get PD object info");
2674 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
2678 * Function API to open IB device.
2680 * This function calls the Linux glue APIs to open a device.
2683 * Pointer to the IB device attributes (name, port, etc).
2684 * @param[out] config
2685 * Pointer to device configuration structure.
2687 * Pointer to shared context structure.
2690 * 0 on success, a positive error value otherwise.
2693 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
2694 const struct mlx5_dev_config *config,
2695 struct mlx5_dev_ctx_shared *sh)
2700 sh->numa_node = spawn->pci_dev->device.numa_node;
2701 pthread_mutex_init(&sh->txpp.mutex, NULL);
2703 * Configure environment variable "MLX5_BF_SHUT_UP"
2704 * before the device creation. The rdma_core library
2705 * checks the variable at device creation and
2706 * stores the result internally.
2708 dbmap_env = mlx5_config_doorbell_mapping_env(config);
2709 /* Try to open IB device with DV first, then usual Verbs. */
2711 sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
2714 DRV_LOG(DEBUG, "DevX is supported");
2715 /* The device is created, no need for environment. */
2716 mlx5_restore_doorbell_mapping_env(dbmap_env);
2718 /* The environment variable is still configured. */
2719 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
2720 err = errno ? errno : ENODEV;
2722 * The environment variable is not needed anymore,
2723 * all device creation attempts are completed.
2725 mlx5_restore_doorbell_mapping_env(dbmap_env);
2728 DRV_LOG(DEBUG, "DevX is NOT supported");
2731 if (!err && sh->ctx) {
2732 /* Hint libmlx5 to use PMD allocator for data plane resources */
2733 mlx5_glue->dv_set_context_attr(sh->ctx,
2734 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2735 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
2736 .alloc = &mlx5_alloc_verbs_buf,
2737 .free = &mlx5_free_verbs_buf,
2745 * Install shared asynchronous device events handler.
2746 * This function is implemented to support event sharing
2747 * between multiple ports of single IB device.
2750 * Pointer to mlx5_dev_ctx_shared object.
2753 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
2758 sh->intr_handle.fd = -1;
2759 flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
2760 ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
2761 F_SETFL, flags | O_NONBLOCK);
2763 DRV_LOG(INFO, "failed to change file descriptor async event"
2766 sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
2767 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
2768 if (rte_intr_callback_register(&sh->intr_handle,
2769 mlx5_dev_interrupt_handler, sh)) {
2770 DRV_LOG(INFO, "Fail to install the shared interrupt.");
2771 sh->intr_handle.fd = -1;
2775 #ifdef HAVE_IBV_DEVX_ASYNC
2776 sh->intr_handle_devx.fd = -1;
2778 (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
2779 struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
2781 DRV_LOG(INFO, "failed to allocate devx_comp.");
2784 flags = fcntl(devx_comp->fd, F_GETFL);
2785 ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
2787 DRV_LOG(INFO, "failed to change file descriptor"
2791 sh->intr_handle_devx.fd = devx_comp->fd;
2792 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
2793 if (rte_intr_callback_register(&sh->intr_handle_devx,
2794 mlx5_dev_interrupt_handler_devx, sh)) {
2795 DRV_LOG(INFO, "Fail to install the devx shared"
2797 sh->intr_handle_devx.fd = -1;
2799 #endif /* HAVE_IBV_DEVX_ASYNC */
2804 * Uninstall shared asynchronous device events handler.
2805 * This function is implemented to support event sharing
2806 * between multiple ports of single IB device.
2809 * Pointer to mlx5_dev_ctx_shared object.
2812 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
2814 if (sh->intr_handle.fd >= 0)
2815 mlx5_intr_callback_unregister(&sh->intr_handle,
2816 mlx5_dev_interrupt_handler, sh);
2817 #ifdef HAVE_IBV_DEVX_ASYNC
2818 if (sh->intr_handle_devx.fd >= 0)
2819 rte_intr_callback_unregister(&sh->intr_handle_devx,
2820 mlx5_dev_interrupt_handler_devx, sh);
2822 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
2827 * Read statistics by a named counter.
2830 * Pointer to the private device data structure.
2831 * @param[in] ctr_name
2832 * Pointer to the name of the statistic counter to read
2834 * Pointer to read statistic value.
2836 * 0 on success and stat is valud, 1 if failed to read the value
2841 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2847 if (priv->q_counters != NULL &&
2848 strcmp(ctr_name, "out_of_buffer") == 0)
2849 return mlx5_devx_cmd_queue_counter_query
2850 (priv->q_counters, 0, (uint32_t *)stat);
2851 MKSTR(path, "%s/ports/%d/hw_counters/%s",
2852 priv->sh->ibdev_path,
2855 fd = open(path, O_RDONLY);
2857 * in switchdev the file location is not per port
2858 * but rather in <ibdev_path>/hw_counters/<file_name>.
2861 MKSTR(path1, "%s/hw_counters/%s",
2862 priv->sh->ibdev_path,
2864 fd = open(path1, O_RDONLY);
2867 char buf[21] = {'\0'};
2868 ssize_t n = read(fd, buf, sizeof(buf));
2872 *stat = strtoull(buf, NULL, 10);
2882 * Set the reg_mr and dereg_mr call backs
2884 * @param reg_mr_cb[out]
2885 * Pointer to reg_mr func
2886 * @param dereg_mr_cb[out]
2887 * Pointer to dereg_mr func
2891 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
2892 mlx5_dereg_mr_t *dereg_mr_cb)
2894 *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr;
2895 *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr;
2899 * Remove a MAC address from device
2902 * Pointer to Ethernet device structure.
2904 * MAC address index.
2907 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2909 struct mlx5_priv *priv = dev->data->dev_private;
2910 const int vf = priv->config.vf;
2913 mlx5_nl_mac_addr_remove(priv->nl_socket_route,
2914 mlx5_ifindex(dev), priv->mac_own,
2915 &dev->data->mac_addrs[index], index);
2919 * Adds a MAC address to the device
2922 * Pointer to Ethernet device structure.
2924 * MAC address to register.
2926 * MAC address index.
2929 * 0 on success, a negative errno value otherwise
2932 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2935 struct mlx5_priv *priv = dev->data->dev_private;
2936 const int vf = priv->config.vf;
2940 ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
2941 mlx5_ifindex(dev), priv->mac_own,
2947 * Modify a VF MAC address
2950 * Pointer to device private data.
2952 * MAC address to modify into.
2954 * Net device interface index
2959 * 0 on success, a negative errno value otherwise
2962 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
2963 unsigned int iface_idx,
2964 struct rte_ether_addr *mac_addr,
2967 return mlx5_nl_vf_mac_addr_modify
2968 (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
2972 * Set device promiscuous mode
2975 * Pointer to Ethernet device structure.
2977 * 0 - promiscuous is disabled, otherwise - enabled
2980 * 0 on success, a negative error value otherwise
2983 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
2985 struct mlx5_priv *priv = dev->data->dev_private;
2987 return mlx5_nl_promisc(priv->nl_socket_route,
2988 mlx5_ifindex(dev), !!enable);
2992 * Set device promiscuous mode
2995 * Pointer to Ethernet device structure.
2997 * 0 - all multicase is disabled, otherwise - enabled
3000 * 0 on success, a negative error value otherwise
3003 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
3005 struct mlx5_priv *priv = dev->data->dev_private;
3007 return mlx5_nl_allmulti(priv->nl_socket_route,
3008 mlx5_ifindex(dev), !!enable);
3012 * Flush device MAC addresses
3015 * Pointer to Ethernet device structure.
3019 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
3021 struct mlx5_priv *priv = dev->data->dev_private;
3023 mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
3024 dev->data->mac_addrs,
3025 MLX5_MAX_MAC_ADDRESSES, priv->mac_own);