1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
13 #include <linux/rtnetlink.h>
14 #include <linux/sockios.h>
15 #include <linux/ethtool.h>
18 #include <rte_malloc.h>
19 #include <ethdev_driver.h>
20 #include <ethdev_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_common.h>
24 #include <rte_kvargs.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_string_fns.h>
28 #include <rte_alarm.h>
29 #include <rte_eal_paging.h>
31 #include <mlx5_glue.h>
32 #include <mlx5_devx_cmds.h>
33 #include <mlx5_common.h>
34 #include <mlx5_common_mp.h>
35 #include <mlx5_common_mr.h>
36 #include <mlx5_malloc.h>
38 #include "mlx5_defs.h"
40 #include "mlx5_common_os.h"
41 #include "mlx5_utils.h"
42 #include "mlx5_rxtx.h"
43 #include "mlx5_autoconf.h"
45 #include "mlx5_flow.h"
46 #include "rte_pmd_mlx5.h"
47 #include "mlx5_verbs.h"
49 #include "mlx5_devx.h"
51 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
53 #ifndef HAVE_IBV_MLX5_MOD_MPW
54 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
55 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
58 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
59 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
62 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
64 /* Spinlock for mlx5_shared_data allocation. */
65 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
67 /* Process local data for secondary processes. */
68 static struct mlx5_local_data mlx5_local_data;
71 * Set the completion channel file descriptor interrupt as non-blocking.
74 * Pointer to RQ channel object, which includes the channel fd
77 * The file descriptor (representing the intetrrupt) used in this channel.
80 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
83 mlx5_os_set_nonblock_channel_fd(int fd)
87 flags = fcntl(fd, F_GETFL);
88 return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
92 * Get mlx5 device attributes. The glue function query_device_ex() is called
93 * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
94 * device attributes from the glue out parameter.
97 * Pointer to ibv context.
100 * Pointer to mlx5 device attributes.
103 * 0 on success, non zero error number otherwise
106 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
109 struct ibv_device_attr_ex attr_ex;
110 memset(device_attr, 0, sizeof(*device_attr));
111 err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
115 device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
116 device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
117 device_attr->max_sge = attr_ex.orig_attr.max_sge;
118 device_attr->max_cq = attr_ex.orig_attr.max_cq;
119 device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
120 device_attr->max_mr = attr_ex.orig_attr.max_mr;
121 device_attr->max_pd = attr_ex.orig_attr.max_pd;
122 device_attr->max_qp = attr_ex.orig_attr.max_qp;
123 device_attr->max_srq = attr_ex.orig_attr.max_srq;
124 device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
125 device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
126 device_attr->max_rwq_indirection_table_size =
127 attr_ex.rss_caps.max_rwq_indirection_table_size;
128 device_attr->max_tso = attr_ex.tso_caps.max_tso;
129 device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
131 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
132 err = mlx5_glue->dv_query_device(ctx, &dv_attr);
136 device_attr->flags = dv_attr.flags;
137 device_attr->comp_mask = dv_attr.comp_mask;
138 #ifdef HAVE_IBV_MLX5_MOD_SWP
139 device_attr->sw_parsing_offloads =
140 dv_attr.sw_parsing_caps.sw_parsing_offloads;
142 device_attr->min_single_stride_log_num_of_bytes =
143 dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
144 device_attr->max_single_stride_log_num_of_bytes =
145 dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
146 device_attr->min_single_wqe_log_num_of_strides =
147 dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
148 device_attr->max_single_wqe_log_num_of_strides =
149 dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
150 device_attr->stride_supported_qpts =
151 dv_attr.striding_rq_caps.supported_qpts;
152 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
153 device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
160 * Verbs callback to allocate a memory. This function should allocate the space
161 * according to the size provided residing inside a huge page.
162 * Please note that all allocation must respect the alignment from libmlx5
163 * (i.e. currently rte_mem_page_size()).
166 * The size in bytes of the memory to allocate.
168 * A pointer to the callback data.
171 * Allocated buffer, NULL otherwise and rte_errno is set.
174 mlx5_alloc_verbs_buf(size_t size, void *data)
176 struct mlx5_dev_ctx_shared *sh = data;
178 size_t alignment = rte_mem_page_size();
179 if (alignment == (size_t)-1) {
180 DRV_LOG(ERR, "Failed to get mem page size");
185 MLX5_ASSERT(data != NULL);
186 ret = mlx5_malloc(0, size, alignment, sh->numa_node);
193 * Verbs callback to free a memory.
196 * A pointer to the memory to free.
198 * A pointer to the callback data.
201 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
203 MLX5_ASSERT(data != NULL);
208 * Initialize DR related data within private structure.
209 * Routine checks the reference counter and does actual
210 * resources creation/initialization only if counter is zero.
213 * Pointer to the private device data structure.
216 * Zero on success, positive error code otherwise.
219 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
221 struct mlx5_dev_ctx_shared *sh = priv->sh;
222 char s[MLX5_HLIST_NAMESIZE] __rte_unused;
225 MLX5_ASSERT(sh && sh->refcnt);
228 err = mlx5_alloc_table_hash_list(priv);
231 /* The resources below are only valid with DV support. */
232 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
233 /* Init port id action cache list. */
234 snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name);
235 mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh,
236 flow_dv_port_id_create_cb,
237 flow_dv_port_id_match_cb,
238 flow_dv_port_id_remove_cb);
239 /* Init push vlan action cache list. */
240 snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name);
241 mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh,
242 flow_dv_push_vlan_create_cb,
243 flow_dv_push_vlan_match_cb,
244 flow_dv_push_vlan_remove_cb);
245 /* Init sample action cache list. */
246 snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name);
247 mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh,
248 flow_dv_sample_create_cb,
249 flow_dv_sample_match_cb,
250 flow_dv_sample_remove_cb);
251 /* Init dest array action cache list. */
252 snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name);
253 mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh,
254 flow_dv_dest_array_create_cb,
255 flow_dv_dest_array_match_cb,
256 flow_dv_dest_array_remove_cb);
257 /* Create tags hash list table. */
258 snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
259 sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,
260 MLX5_HLIST_WRITE_MOST,
261 flow_dv_tag_create_cb,
262 flow_dv_tag_match_cb,
263 flow_dv_tag_remove_cb);
264 if (!sh->tag_table) {
265 DRV_LOG(ERR, "tags with hash creation failed.");
269 sh->tag_table->ctx = sh;
270 snprintf(s, sizeof(s), "%s_hdr_modify", sh->ibdev_name);
271 sh->modify_cmds = mlx5_hlist_create(s, MLX5_FLOW_HDR_MODIFY_HTABLE_SZ,
272 0, MLX5_HLIST_WRITE_MOST |
273 MLX5_HLIST_DIRECT_KEY,
274 flow_dv_modify_create_cb,
275 flow_dv_modify_match_cb,
276 flow_dv_modify_remove_cb);
277 if (!sh->modify_cmds) {
278 DRV_LOG(ERR, "hdr modify hash creation failed");
282 sh->modify_cmds->ctx = sh;
283 snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
284 sh->encaps_decaps = mlx5_hlist_create(s,
285 MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
286 0, MLX5_HLIST_DIRECT_KEY |
287 MLX5_HLIST_WRITE_MOST,
288 flow_dv_encap_decap_create_cb,
289 flow_dv_encap_decap_match_cb,
290 flow_dv_encap_decap_remove_cb);
291 if (!sh->encaps_decaps) {
292 DRV_LOG(ERR, "encap decap hash creation failed");
296 sh->encaps_decaps->ctx = sh;
298 #ifdef HAVE_MLX5DV_DR
301 /* Reference counter is zero, we should initialize structures. */
302 domain = mlx5_glue->dr_create_domain(sh->ctx,
303 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
305 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
309 sh->rx_domain = domain;
310 domain = mlx5_glue->dr_create_domain(sh->ctx,
311 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
313 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
317 sh->tx_domain = domain;
318 #ifdef HAVE_MLX5DV_DR_ESWITCH
319 if (priv->config.dv_esw_en) {
320 domain = mlx5_glue->dr_create_domain
321 (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
323 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
327 sh->fdb_domain = domain;
328 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
332 err = mlx5_alloc_tunnel_hub(sh);
334 DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
337 if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
338 mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
339 mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
341 mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
343 sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
344 #endif /* HAVE_MLX5DV_DR */
345 sh->default_miss_action =
346 mlx5_glue->dr_create_flow_action_default_miss();
347 if (!sh->default_miss_action)
348 DRV_LOG(WARNING, "Default miss action is not supported.");
351 /* Rollback the created objects. */
353 mlx5_glue->dr_destroy_domain(sh->rx_domain);
354 sh->rx_domain = NULL;
357 mlx5_glue->dr_destroy_domain(sh->tx_domain);
358 sh->tx_domain = NULL;
360 if (sh->fdb_domain) {
361 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
362 sh->fdb_domain = NULL;
364 if (sh->esw_drop_action) {
365 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
366 sh->esw_drop_action = NULL;
368 if (sh->pop_vlan_action) {
369 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
370 sh->pop_vlan_action = NULL;
372 if (sh->encaps_decaps) {
373 mlx5_hlist_destroy(sh->encaps_decaps);
374 sh->encaps_decaps = NULL;
376 if (sh->modify_cmds) {
377 mlx5_hlist_destroy(sh->modify_cmds);
378 sh->modify_cmds = NULL;
381 /* tags should be destroyed with flow before. */
382 mlx5_hlist_destroy(sh->tag_table);
383 sh->tag_table = NULL;
385 if (sh->tunnel_hub) {
386 mlx5_release_tunnel_hub(sh, priv->dev_port);
387 sh->tunnel_hub = NULL;
389 mlx5_free_table_hash_list(priv);
394 * Destroy DR related data within private structure.
397 * Pointer to the private device data structure.
400 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
402 struct mlx5_dev_ctx_shared *sh = priv->sh;
404 MLX5_ASSERT(sh && sh->refcnt);
407 #ifdef HAVE_MLX5DV_DR
409 mlx5_glue->dr_destroy_domain(sh->rx_domain);
410 sh->rx_domain = NULL;
413 mlx5_glue->dr_destroy_domain(sh->tx_domain);
414 sh->tx_domain = NULL;
416 #ifdef HAVE_MLX5DV_DR_ESWITCH
417 if (sh->fdb_domain) {
418 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
419 sh->fdb_domain = NULL;
421 if (sh->esw_drop_action) {
422 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
423 sh->esw_drop_action = NULL;
426 if (sh->pop_vlan_action) {
427 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
428 sh->pop_vlan_action = NULL;
430 #endif /* HAVE_MLX5DV_DR */
431 if (sh->default_miss_action)
432 mlx5_glue->destroy_flow_action
433 (sh->default_miss_action);
434 if (sh->encaps_decaps) {
435 mlx5_hlist_destroy(sh->encaps_decaps);
436 sh->encaps_decaps = NULL;
438 if (sh->modify_cmds) {
439 mlx5_hlist_destroy(sh->modify_cmds);
440 sh->modify_cmds = NULL;
443 /* tags should be destroyed with flow before. */
444 mlx5_hlist_destroy(sh->tag_table);
445 sh->tag_table = NULL;
447 if (sh->tunnel_hub) {
448 mlx5_release_tunnel_hub(sh, priv->dev_port);
449 sh->tunnel_hub = NULL;
451 mlx5_cache_list_destroy(&sh->port_id_action_list);
452 mlx5_cache_list_destroy(&sh->push_vlan_action_list);
453 mlx5_free_table_hash_list(priv);
457 * Initialize shared data between primary and secondary process.
459 * A memzone is reserved by primary process and secondary processes attach to
463 * 0 on success, a negative errno value otherwise and rte_errno is set.
466 mlx5_init_shared_data(void)
468 const struct rte_memzone *mz;
471 rte_spinlock_lock(&mlx5_shared_data_lock);
472 if (mlx5_shared_data == NULL) {
473 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
474 /* Allocate shared memory. */
475 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
476 sizeof(*mlx5_shared_data),
480 "Cannot allocate mlx5 shared data");
484 mlx5_shared_data = mz->addr;
485 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
486 rte_spinlock_init(&mlx5_shared_data->lock);
488 /* Lookup allocated shared memory. */
489 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
492 "Cannot attach mlx5 shared data");
496 mlx5_shared_data = mz->addr;
497 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
501 rte_spinlock_unlock(&mlx5_shared_data_lock);
506 * PMD global initialization.
508 * Independent from individual device, this function initializes global
509 * per-PMD data structures distinguishing primary and secondary processes.
510 * Hence, each initialization is called once per a process.
513 * 0 on success, a negative errno value otherwise and rte_errno is set.
518 struct mlx5_shared_data *sd;
519 struct mlx5_local_data *ld = &mlx5_local_data;
522 if (mlx5_init_shared_data())
524 sd = mlx5_shared_data;
526 rte_spinlock_lock(&sd->lock);
527 switch (rte_eal_process_type()) {
528 case RTE_PROC_PRIMARY:
531 LIST_INIT(&sd->mem_event_cb_list);
532 rte_rwlock_init(&sd->mem_event_rwlock);
533 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
534 mlx5_mr_mem_event_cb, NULL);
535 ret = mlx5_mp_init_primary(MLX5_MP_NAME,
536 mlx5_mp_os_primary_handle);
539 sd->init_done = true;
541 case RTE_PROC_SECONDARY:
544 ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
545 mlx5_mp_os_secondary_handle);
549 ld->init_done = true;
555 rte_spinlock_unlock(&sd->lock);
560 * Create the Tx queue DevX/Verbs object.
563 * Pointer to Ethernet device.
565 * Queue index in DPDK Tx queue array.
568 * 0 on success, a negative errno value otherwise and rte_errno is set.
571 mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
573 struct mlx5_priv *priv = dev->data->dev_private;
574 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
575 struct mlx5_txq_ctrl *txq_ctrl =
576 container_of(txq_data, struct mlx5_txq_ctrl, txq);
578 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
579 return mlx5_txq_devx_obj_new(dev, idx);
580 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
581 if (!priv->config.dv_esw_en)
582 return mlx5_txq_devx_obj_new(dev, idx);
584 return mlx5_txq_ibv_obj_new(dev, idx);
588 * Release an Tx DevX/verbs queue object.
591 * DevX/Verbs Tx queue object.
594 mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj)
596 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
597 mlx5_txq_devx_obj_release(txq_obj);
600 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
601 if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) {
602 mlx5_txq_devx_obj_release(txq_obj);
606 mlx5_txq_ibv_obj_release(txq_obj);
610 * DV flow counter mode detect and config.
613 * Pointer to rte_eth_dev structure.
617 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
619 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
620 struct mlx5_priv *priv = dev->data->dev_private;
621 struct mlx5_dev_ctx_shared *sh = priv->sh;
624 #ifndef HAVE_IBV_DEVX_ASYNC
628 if (!priv->config.devx || !priv->config.dv_flow_en ||
629 !priv->config.hca_attr.flow_counters_dump ||
630 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
631 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
635 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
636 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
637 priv->config.hca_attr.flow_counters_dump,
638 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
639 /* Initialize fallback mode only on the port initializes sh. */
641 sh->cmng.counter_fallback = fallback;
642 else if (fallback != sh->cmng.counter_fallback)
643 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
644 "with others:%d.", PORT_ID(priv), fallback);
649 mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
651 struct mlx5_priv *priv = dev->data->dev_private;
652 void *ctx = priv->sh->ctx;
654 priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
655 if (!priv->q_counters) {
656 struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
659 DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
660 "by DevX - fall-back to use the kernel driver global "
661 "queue counter.", dev->data->port_id);
662 /* Create WQ by kernel and query its queue counter ID. */
664 wq = mlx5_glue->create_wq(ctx,
665 &(struct ibv_wq_init_attr){
666 .wq_type = IBV_WQT_RQ,
673 /* Counter is assigned only on RDY state. */
674 int ret = mlx5_glue->modify_wq(wq,
675 &(struct ibv_wq_attr){
676 .attr_mask = IBV_WQ_ATTR_STATE,
677 .wq_state = IBV_WQS_RDY,
681 mlx5_devx_cmd_wq_query(wq,
682 &priv->counter_set_id);
683 claim_zero(mlx5_glue->destroy_wq(wq));
685 claim_zero(mlx5_glue->destroy_cq(cq));
688 priv->counter_set_id = priv->q_counters->id;
690 if (priv->counter_set_id == 0)
691 DRV_LOG(INFO, "Part of the port %d statistics will not be "
692 "available.", dev->data->port_id);
696 * Check if representor spawn info match devargs.
699 * Verbs device parameters (name, port, switch_info) to spawn.
701 * Device devargs to probe.
707 mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
708 struct rte_eth_devargs *eth_da)
710 struct mlx5_switch_info *switch_info = &spawn->info;
713 uint16_t repr_id = mlx5_representor_id_encode(switch_info);
715 switch (eth_da->type) {
716 case RTE_ETH_REPRESENTOR_SF:
717 if (switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF) {
722 case RTE_ETH_REPRESENTOR_VF:
723 /* Allows HPF representor index -1 as exception. */
724 if (!(spawn->info.port_name == -1 &&
725 switch_info->name_type ==
726 MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
727 switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF) {
732 case RTE_ETH_REPRESENTOR_NONE:
737 DRV_LOG(ERR, "unsupported representor type");
740 /* Check representor ID: */
741 for (p = 0; p < eth_da->nb_ports; ++p) {
742 if (spawn->pf_bond < 0) {
743 /* For non-LAG mode, allow and ignore pf. */
744 switch_info->pf_num = eth_da->ports[p];
745 repr_id = mlx5_representor_id_encode(switch_info);
747 for (f = 0; f < eth_da->nb_representor_ports; ++f) {
748 id = MLX5_REPRESENTOR_ID
749 (eth_da->ports[p], eth_da->type,
750 eth_da->representor_ports[f]);
761 * Spawn an Ethernet device from Verbs information.
764 * Backing DPDK device.
766 * Verbs device parameters (name, port, switch_info) to spawn.
768 * Device configuration parameters.
773 * A valid Ethernet device object on success, NULL otherwise and rte_errno
774 * is set. The following errors are defined:
776 * EBUSY: device is not supposed to be spawned.
777 * EEXIST: device is already spawned
779 static struct rte_eth_dev *
780 mlx5_dev_spawn(struct rte_device *dpdk_dev,
781 struct mlx5_dev_spawn_data *spawn,
782 struct mlx5_dev_config *config,
783 struct rte_eth_devargs *eth_da)
785 const struct mlx5_switch_info *switch_info = &spawn->info;
786 struct mlx5_dev_ctx_shared *sh = NULL;
787 struct ibv_port_attr port_attr;
788 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
789 struct rte_eth_dev *eth_dev = NULL;
790 struct mlx5_priv *priv = NULL;
792 unsigned int hw_padding = 0;
794 unsigned int tunnel_en = 0;
795 unsigned int mpls_en = 0;
796 unsigned int swp = 0;
797 unsigned int mprq = 0;
798 unsigned int mprq_min_stride_size_n = 0;
799 unsigned int mprq_max_stride_size_n = 0;
800 unsigned int mprq_min_stride_num_n = 0;
801 unsigned int mprq_max_stride_num_n = 0;
802 struct rte_ether_addr mac;
803 char name[RTE_ETH_NAME_MAX_LEN];
804 int own_domain_id = 0;
806 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
807 struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
810 /* Determine if this port representor is supposed to be spawned. */
811 if (switch_info->representor && dpdk_dev->devargs &&
812 !mlx5_representor_match(spawn, eth_da))
814 /* Build device name. */
815 if (spawn->pf_bond < 0) {
817 if (!switch_info->representor)
818 strlcpy(name, dpdk_dev->name, sizeof(name));
820 err = snprintf(name, sizeof(name), "%s_representor_%s%u",
822 switch_info->name_type ==
823 MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
824 switch_info->port_name);
826 /* Bonding device. */
827 if (!switch_info->representor) {
828 err = snprintf(name, sizeof(name), "%s_%s",
830 mlx5_os_get_dev_device_name(spawn->phys_dev));
832 err = snprintf(name, sizeof(name), "%s_%s_representor_c%dpf%d%s%u",
834 mlx5_os_get_dev_device_name(spawn->phys_dev),
835 switch_info->ctrl_num,
837 switch_info->name_type ==
838 MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
839 switch_info->port_name);
842 if (err >= (int)sizeof(name))
843 DRV_LOG(WARNING, "device name overflow %s", name);
844 /* check if the device is already spawned */
845 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
849 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
850 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
851 struct mlx5_mp_id mp_id;
853 eth_dev = rte_eth_dev_attach_secondary(name);
854 if (eth_dev == NULL) {
855 DRV_LOG(ERR, "can not attach rte ethdev");
859 eth_dev->device = dpdk_dev;
860 eth_dev->dev_ops = &mlx5_dev_sec_ops;
861 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
862 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
863 err = mlx5_proc_priv_init(eth_dev);
866 mp_id.port_id = eth_dev->data->port_id;
867 strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
868 /* Receive command fd from primary process */
869 err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
872 /* Remap UAR for Tx queues. */
873 err = mlx5_tx_uar_init_secondary(eth_dev, err);
877 * Ethdev pointer is still required as input since
878 * the primary device is not accessible from the
881 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
882 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
885 mlx5_dev_close(eth_dev);
889 * Some parameters ("tx_db_nc" in particularly) are needed in
890 * advance to create dv/verbs device context. We proceed the
891 * devargs here to get ones, and later proceed devargs again
892 * to override some hardware settings.
894 err = mlx5_args(config, dpdk_dev->devargs);
897 DRV_LOG(ERR, "failed to process device arguments: %s",
898 strerror(rte_errno));
901 if (config->dv_miss_info) {
902 if (switch_info->master || switch_info->representor)
903 config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
905 mlx5_malloc_mem_select(config->sys_mem_en);
906 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
909 config->devx = sh->devx;
910 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
911 config->dest_tir = 1;
913 #ifdef HAVE_IBV_MLX5_MOD_SWP
914 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
917 * Multi-packet send is supported by ConnectX-4 Lx PF as well
918 * as all ConnectX-5 devices.
920 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
921 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
923 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
924 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
926 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
927 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
928 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
929 DRV_LOG(DEBUG, "enhanced MPW is supported");
930 mps = MLX5_MPW_ENHANCED;
932 DRV_LOG(DEBUG, "MPW is supported");
936 DRV_LOG(DEBUG, "MPW isn't supported");
937 mps = MLX5_MPW_DISABLED;
939 #ifdef HAVE_IBV_MLX5_MOD_SWP
940 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
941 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
942 DRV_LOG(DEBUG, "SWP support: %u", swp);
945 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
946 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
947 struct mlx5dv_striding_rq_caps mprq_caps =
948 dv_attr.striding_rq_caps;
950 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
951 mprq_caps.min_single_stride_log_num_of_bytes);
952 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
953 mprq_caps.max_single_stride_log_num_of_bytes);
954 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
955 mprq_caps.min_single_wqe_log_num_of_strides);
956 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
957 mprq_caps.max_single_wqe_log_num_of_strides);
958 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
959 mprq_caps.supported_qpts);
960 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
962 mprq_min_stride_size_n =
963 mprq_caps.min_single_stride_log_num_of_bytes;
964 mprq_max_stride_size_n =
965 mprq_caps.max_single_stride_log_num_of_bytes;
966 mprq_min_stride_num_n =
967 mprq_caps.min_single_wqe_log_num_of_strides;
968 mprq_max_stride_num_n =
969 mprq_caps.max_single_wqe_log_num_of_strides;
972 /* Rx CQE compression is enabled by default. */
973 config->cqe_comp = 1;
974 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
975 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
976 tunnel_en = ((dv_attr.tunnel_offloads_caps &
977 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
978 (dv_attr.tunnel_offloads_caps &
979 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
980 (dv_attr.tunnel_offloads_caps &
981 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
983 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
984 tunnel_en ? "" : "not ");
987 "tunnel offloading disabled due to old OFED/rdma-core version");
989 config->tunnel_en = tunnel_en;
990 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
991 mpls_en = ((dv_attr.tunnel_offloads_caps &
992 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
993 (dv_attr.tunnel_offloads_caps &
994 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
995 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
996 mpls_en ? "" : "not ");
998 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
999 " old OFED/rdma-core version or firmware configuration");
1001 config->mpls_en = mpls_en;
1002 /* Check port status. */
1003 err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
1005 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1008 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1009 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1013 if (port_attr.state != IBV_PORT_ACTIVE)
1014 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
1015 mlx5_glue->port_state_str(port_attr.state),
1017 /* Allocate private eth device data. */
1018 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1020 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1022 DRV_LOG(ERR, "priv allocation failure");
1027 priv->dev_port = spawn->phys_port;
1028 priv->pci_dev = spawn->pci_dev;
1029 priv->mtu = RTE_ETHER_MTU;
1030 /* Some internal functions rely on Netlink sockets, open them now. */
1031 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
1032 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1033 priv->representor = !!switch_info->representor;
1034 priv->master = !!switch_info->master;
1035 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1036 priv->vport_meta_tag = 0;
1037 priv->vport_meta_mask = 0;
1038 priv->pf_bond = spawn->pf_bond;
1039 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
1041 * The DevX port query API is implemented. E-Switch may use
1042 * either vport or reg_c[0] metadata register to match on
1043 * vport index. The engaged part of metadata register is
1046 if (switch_info->representor || switch_info->master) {
1047 devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
1048 MLX5DV_DEVX_PORT_MATCH_REG_C_0;
1049 err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port,
1053 "can't query devx port %d on device %s",
1055 mlx5_os_get_dev_device_name(spawn->phys_dev));
1056 devx_port.comp_mask = 0;
1059 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
1060 priv->vport_meta_tag = devx_port.reg_c_0.value;
1061 priv->vport_meta_mask = devx_port.reg_c_0.mask;
1062 if (!priv->vport_meta_mask) {
1063 DRV_LOG(ERR, "vport zero mask for port %d"
1064 " on bonding device %s",
1066 mlx5_os_get_dev_device_name
1071 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1072 DRV_LOG(ERR, "invalid vport tag for port %d"
1073 " on bonding device %s",
1075 mlx5_os_get_dev_device_name
1081 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
1082 priv->vport_id = devx_port.vport_num;
1083 } else if (spawn->pf_bond >= 0) {
1084 DRV_LOG(ERR, "can't deduce vport index for port %d"
1085 " on bonding device %s",
1087 mlx5_os_get_dev_device_name(spawn->phys_dev));
1091 /* Suppose vport index in compatible way. */
1092 priv->vport_id = switch_info->representor ?
1093 switch_info->port_name + 1 : -1;
1097 * Kernel/rdma_core support single E-Switch per PF configurations
1098 * only and vport_id field contains the vport index for
1099 * associated VF, which is deduced from representor port name.
1100 * For example, let's have the IB device port 10, it has
1101 * attached network device eth0, which has port name attribute
1102 * pf0vf2, we can deduce the VF number as 2, and set vport index
1103 * as 3 (2+1). This assigning schema should be changed if the
1104 * multiple E-Switch instances per PF configurations or/and PCI
1105 * subfunctions are added.
1107 priv->vport_id = switch_info->representor ?
1108 switch_info->port_name + 1 : -1;
1110 priv->representor_id = mlx5_representor_id_encode(switch_info);
1112 * Look for sibling devices in order to reuse their switch domain
1113 * if any, otherwise allocate one.
1115 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
1116 const struct mlx5_priv *opriv =
1117 rte_eth_devices[port_id].data->dev_private;
1120 opriv->sh != priv->sh ||
1122 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1124 priv->domain_id = opriv->domain_id;
1127 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1128 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1131 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1132 strerror(rte_errno));
1137 /* Override some values set by hardware configuration. */
1138 mlx5_args(config, dpdk_dev->devargs);
1139 err = mlx5_dev_check_sibling_config(priv, config);
1142 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1143 IBV_DEVICE_RAW_IP_CSUM);
1144 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1145 (config->hw_csum ? "" : "not "));
1146 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1147 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1148 DRV_LOG(DEBUG, "counters are not supported");
1150 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
1151 if (config->dv_flow_en) {
1152 DRV_LOG(WARNING, "DV flow is not supported");
1153 config->dv_flow_en = 0;
1156 config->ind_table_max_size =
1157 sh->device_attr.max_rwq_indirection_table_size;
1159 * Remove this check once DPDK supports larger/variable
1160 * indirection tables.
1162 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1163 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1164 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1165 config->ind_table_max_size);
1166 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1167 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1168 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1169 (config->hw_vlan_strip ? "" : "not "));
1170 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1171 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1172 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1173 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1174 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1175 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1176 IBV_DEVICE_PCI_WRITE_END_PADDING);
1178 if (config->hw_padding && !hw_padding) {
1179 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1180 config->hw_padding = 0;
1181 } else if (config->hw_padding) {
1182 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1184 config->tso = (sh->device_attr.max_tso > 0 &&
1185 (sh->device_attr.tso_supported_qpts &
1186 (1 << IBV_QPT_RAW_PACKET)));
1188 config->tso_max_payload_sz = sh->device_attr.max_tso;
1190 * MPW is disabled by default, while the Enhanced MPW is enabled
1193 if (config->mps == MLX5_ARG_UNSET)
1194 config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1197 config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
1198 DRV_LOG(INFO, "%sMPS is %s",
1199 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
1200 config->mps == MLX5_MPW ? "legacy " : "",
1201 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1203 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
1208 /* Check relax ordering support. */
1209 if (!haswell_broadwell_cpu) {
1210 sh->cmng.relaxed_ordering_write =
1211 config->hca_attr.relaxed_ordering_write;
1212 sh->cmng.relaxed_ordering_read =
1213 config->hca_attr.relaxed_ordering_read;
1215 sh->cmng.relaxed_ordering_read = 0;
1216 sh->cmng.relaxed_ordering_write = 0;
1218 sh->rq_ts_format = config->hca_attr.rq_ts_format;
1219 sh->sq_ts_format = config->hca_attr.sq_ts_format;
1220 sh->qp_ts_format = config->hca_attr.qp_ts_format;
1221 /* Check for LRO support. */
1222 if (config->dest_tir && config->hca_attr.lro_cap &&
1223 config->dv_flow_en) {
1224 /* TBD check tunnel lro caps. */
1225 config->lro.supported = config->hca_attr.lro_cap;
1226 DRV_LOG(DEBUG, "Device supports LRO");
1228 * If LRO timeout is not configured by application,
1229 * use the minimal supported value.
1231 if (!config->lro.timeout)
1232 config->lro.timeout =
1233 config->hca_attr.lro_timer_supported_periods[0];
1234 DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
1235 config->lro.timeout);
1236 DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
1237 "required for coalescing is %d bytes",
1238 config->hca_attr.lro_min_mss_size);
1240 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
1241 if (config->hca_attr.qos.sup &&
1242 config->hca_attr.qos.flow_meter_old &&
1243 config->dv_flow_en) {
1244 uint8_t reg_c_mask =
1245 config->hca_attr.qos.flow_meter_reg_c_ids;
1247 * Meter needs two REG_C's for color match and pre-sfx
1248 * flow match. Here get the REG_C for color match.
1249 * REG_C_0 and REG_C_1 is reserved for metadata feature.
1252 if (__builtin_popcount(reg_c_mask) < 1) {
1254 DRV_LOG(WARNING, "No available register for"
1258 * The meter color register is used by the
1259 * flow-hit feature as well.
1260 * The flow-hit feature must use REG_C_3
1261 * Prefer REG_C_3 if it is available.
1263 if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
1264 priv->mtr_color_reg = REG_C_3;
1266 priv->mtr_color_reg = ffs(reg_c_mask)
1269 priv->mtr_reg_share =
1270 config->hca_attr.qos.flow_meter;
1271 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
1272 priv->mtr_color_reg);
1276 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
1277 if (config->hca_attr.flow_hit_aso &&
1278 priv->mtr_color_reg == REG_C_3) {
1279 sh->flow_hit_aso_en = 1;
1280 err = mlx5_flow_aso_age_mng_init(sh);
1285 DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
1287 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
1288 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1289 if (config->hca_attr.log_max_ft_sampler_num > 0 &&
1290 config->dv_flow_en) {
1291 priv->sampler_en = 1;
1292 DRV_LOG(DEBUG, "Sampler enabled!");
1294 priv->sampler_en = 0;
1295 if (!config->hca_attr.log_max_ft_sampler_num)
1297 "No available register for sampler.");
1299 DRV_LOG(DEBUG, "DV flow is not supported!");
1303 if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
1304 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
1305 DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
1306 config->cqe_comp = 0;
1308 if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
1309 (!config->devx || !config->hca_attr.mini_cqe_resp_flow_tag)) {
1310 DRV_LOG(WARNING, "Flow Tag CQE compression"
1311 " format isn't supported.");
1312 config->cqe_comp = 0;
1314 if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
1315 (!config->devx || !config->hca_attr.mini_cqe_resp_l3_l4_tag)) {
1316 DRV_LOG(WARNING, "L3/L4 Header CQE compression"
1317 " format isn't supported.");
1318 config->cqe_comp = 0;
1320 DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
1321 config->cqe_comp ? "" : "not ");
1322 if (config->tx_pp) {
1323 DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
1324 config->hca_attr.dev_freq_khz);
1325 DRV_LOG(DEBUG, "Packet pacing is %ssupported",
1326 config->hca_attr.qos.packet_pacing ? "" : "not ");
1327 DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
1328 config->hca_attr.cross_channel ? "" : "not ");
1329 DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
1330 config->hca_attr.wqe_index_ignore ? "" : "not ");
1331 DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
1332 config->hca_attr.non_wire_sq ? "" : "not ");
1333 DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
1334 config->hca_attr.log_max_static_sq_wq ? "" : "not ",
1335 config->hca_attr.log_max_static_sq_wq);
1336 DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
1337 config->hca_attr.qos.wqe_rate_pp ? "" : "not ");
1338 if (!config->devx) {
1339 DRV_LOG(ERR, "DevX is required for packet pacing");
1343 if (!config->hca_attr.qos.packet_pacing) {
1344 DRV_LOG(ERR, "Packet pacing is not supported");
1348 if (!config->hca_attr.cross_channel) {
1349 DRV_LOG(ERR, "Cross channel operations are"
1350 " required for packet pacing");
1354 if (!config->hca_attr.wqe_index_ignore) {
1355 DRV_LOG(ERR, "WQE index ignore feature is"
1356 " required for packet pacing");
1360 if (!config->hca_attr.non_wire_sq) {
1361 DRV_LOG(ERR, "Non-wire SQ feature is"
1362 " required for packet pacing");
1366 if (!config->hca_attr.log_max_static_sq_wq) {
1367 DRV_LOG(ERR, "Static WQE SQ feature is"
1368 " required for packet pacing");
1372 if (!config->hca_attr.qos.wqe_rate_pp) {
1373 DRV_LOG(ERR, "WQE rate mode is required"
1374 " for packet pacing");
1378 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1379 DRV_LOG(ERR, "DevX does not provide UAR offset,"
1380 " can't create queues for packet pacing");
1386 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
1388 err = config->hca_attr.access_register_user ?
1389 mlx5_devx_cmd_register_read
1390 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
1391 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
1395 /* MTUTC register is read successfully. */
1396 ts_mode = MLX5_GET(register_mtutc, reg,
1398 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
1399 config->rt_timestamp = 1;
1401 /* Kernel does not support register reading. */
1402 if (config->hca_attr.dev_freq_khz ==
1403 (NS_PER_S / MS_PER_S))
1404 config->rt_timestamp = 1;
1408 * If HW has bug working with tunnel packet decapsulation and
1409 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
1410 * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
1412 if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en)
1413 config->hw_fcs_strip = 0;
1414 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1415 (config->hw_fcs_strip ? "" : "not "));
1416 if (config->mprq.enabled && mprq) {
1417 if (config->mprq.stride_num_n &&
1418 (config->mprq.stride_num_n > mprq_max_stride_num_n ||
1419 config->mprq.stride_num_n < mprq_min_stride_num_n)) {
1420 config->mprq.stride_num_n =
1421 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1422 mprq_min_stride_num_n),
1423 mprq_max_stride_num_n);
1425 "the number of strides"
1426 " for Multi-Packet RQ is out of range,"
1427 " setting default value (%u)",
1428 1 << config->mprq.stride_num_n);
1430 if (config->mprq.stride_size_n &&
1431 (config->mprq.stride_size_n > mprq_max_stride_size_n ||
1432 config->mprq.stride_size_n < mprq_min_stride_size_n)) {
1433 config->mprq.stride_size_n =
1434 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
1435 mprq_min_stride_size_n),
1436 mprq_max_stride_size_n);
1438 "the size of a stride"
1439 " for Multi-Packet RQ is out of range,"
1440 " setting default value (%u)",
1441 1 << config->mprq.stride_size_n);
1443 config->mprq.min_stride_size_n = mprq_min_stride_size_n;
1444 config->mprq.max_stride_size_n = mprq_max_stride_size_n;
1445 } else if (config->mprq.enabled && !mprq) {
1446 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1447 config->mprq.enabled = 0;
1449 if (config->max_dump_files_num == 0)
1450 config->max_dump_files_num = 128;
1451 eth_dev = rte_eth_dev_allocate(name);
1452 if (eth_dev == NULL) {
1453 DRV_LOG(ERR, "can not allocate rte ethdev");
1457 if (priv->representor) {
1458 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1459 eth_dev->data->representor_id = priv->representor_id;
1461 priv->mp_id.port_id = eth_dev->data->port_id;
1462 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
1464 * Store associated network device interface index. This index
1465 * is permanent throughout the lifetime of device. So, we may store
1466 * the ifindex here and use the cached value further.
1468 MLX5_ASSERT(spawn->ifindex);
1469 priv->if_index = spawn->ifindex;
1470 if (priv->pf_bond >= 0 && priv->master) {
1471 /* Get bond interface info */
1472 err = mlx5_sysfs_bond_info(priv->if_index,
1473 &priv->bond_ifindex,
1476 DRV_LOG(ERR, "unable to get bond info: %s",
1477 strerror(rte_errno));
1479 DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
1480 priv->if_index, priv->bond_ifindex,
1483 eth_dev->data->dev_private = priv;
1484 priv->dev_data = eth_dev->data;
1485 eth_dev->data->mac_addrs = priv->mac;
1486 eth_dev->device = dpdk_dev;
1487 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1488 /* Configure the first MAC address by default. */
1489 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1491 "port %u cannot get MAC address, is mlx5_en"
1492 " loaded? (errno: %s)",
1493 eth_dev->data->port_id, strerror(rte_errno));
1498 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1499 eth_dev->data->port_id,
1500 mac.addr_bytes[0], mac.addr_bytes[1],
1501 mac.addr_bytes[2], mac.addr_bytes[3],
1502 mac.addr_bytes[4], mac.addr_bytes[5]);
1503 #ifdef RTE_LIBRTE_MLX5_DEBUG
1505 char ifname[MLX5_NAMESIZE];
1507 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1508 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1509 eth_dev->data->port_id, ifname);
1511 DRV_LOG(DEBUG, "port %u ifname is unknown",
1512 eth_dev->data->port_id);
1515 /* Get actual MTU if possible. */
1516 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1521 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1523 /* Initialize burst functions to prevent crashes before link-up. */
1524 eth_dev->rx_pkt_burst = removed_rx_burst;
1525 eth_dev->tx_pkt_burst = removed_tx_burst;
1526 eth_dev->dev_ops = &mlx5_dev_ops;
1527 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1528 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1529 eth_dev->rx_queue_count = mlx5_rx_queue_count;
1530 /* Register MAC address. */
1531 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1532 if (config->vf && config->vf_nl_en)
1533 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1534 mlx5_ifindex(eth_dev),
1535 eth_dev->data->mac_addrs,
1536 MLX5_MAX_MAC_ADDRESSES);
1538 priv->ctrl_flows = 0;
1539 rte_spinlock_init(&priv->flow_list_lock);
1540 TAILQ_INIT(&priv->flow_meters);
1541 TAILQ_INIT(&priv->flow_meter_profiles);
1542 /* Hint libmlx5 to use PMD allocator for data plane resources */
1543 mlx5_glue->dv_set_context_attr(sh->ctx,
1544 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1545 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
1546 .alloc = &mlx5_alloc_verbs_buf,
1547 .free = &mlx5_free_verbs_buf,
1550 /* Bring Ethernet device up. */
1551 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1552 eth_dev->data->port_id);
1553 mlx5_set_link_up(eth_dev);
1555 * Even though the interrupt handler is not installed yet,
1556 * interrupts will still trigger on the async_fd from
1557 * Verbs context returned by ibv_open_device().
1559 mlx5_link_update(eth_dev, 0);
1560 #ifdef HAVE_MLX5DV_DR_ESWITCH
1561 if (!(config->hca_attr.eswitch_manager && config->dv_flow_en &&
1562 (switch_info->representor || switch_info->master)))
1563 config->dv_esw_en = 0;
1565 config->dv_esw_en = 0;
1567 /* Detect minimal data bytes to inline. */
1568 mlx5_set_min_inline(spawn, config);
1569 /* Store device configuration on private structure. */
1570 priv->config = *config;
1571 /* Create context for virtual machine VLAN workaround. */
1572 priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1573 if (config->dv_flow_en) {
1574 err = mlx5_alloc_shared_dr(priv);
1578 if (config->devx && config->dv_flow_en && config->dest_tir) {
1579 priv->obj_ops = devx_obj_ops;
1580 priv->obj_ops.drop_action_create =
1581 ibv_obj_ops.drop_action_create;
1582 priv->obj_ops.drop_action_destroy =
1583 ibv_obj_ops.drop_action_destroy;
1584 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1585 priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify;
1587 if (config->dv_esw_en)
1588 priv->obj_ops.txq_obj_modify =
1589 ibv_obj_ops.txq_obj_modify;
1591 /* Use specific wrappers for Tx object. */
1592 priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
1593 priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
1594 mlx5_queue_counter_id_prepare(eth_dev);
1597 priv->obj_ops = ibv_obj_ops;
1599 priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
1600 if (!priv->drop_queue.hrxq)
1602 /* Supported Verbs flow priority number detection. */
1603 err = mlx5_flow_discover_priorities(eth_dev);
1608 priv->config.flow_prio = err;
1609 if (!priv->config.dv_esw_en &&
1610 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1611 DRV_LOG(WARNING, "metadata mode %u is not supported "
1612 "(no E-Switch)", priv->config.dv_xmeta_en);
1613 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1615 mlx5_set_metadata_mask(eth_dev);
1616 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1617 !priv->sh->dv_regc0_mask) {
1618 DRV_LOG(ERR, "metadata mode %u is not supported "
1619 "(no metadata reg_c[0] is available)",
1620 priv->config.dv_xmeta_en);
1624 mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
1625 mlx5_hrxq_create_cb,
1627 mlx5_hrxq_remove_cb);
1628 /* Query availability of metadata reg_c's. */
1629 err = mlx5_flow_discover_mreg_c(eth_dev);
1634 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1636 "port %u extensive metadata register is not supported",
1637 eth_dev->data->port_id);
1638 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1639 DRV_LOG(ERR, "metadata mode %u is not supported "
1640 "(no metadata registers available)",
1641 priv->config.dv_xmeta_en);
1646 if (priv->config.dv_flow_en &&
1647 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1648 mlx5_flow_ext_mreg_supported(eth_dev) &&
1649 priv->sh->dv_regc0_mask) {
1650 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1651 MLX5_FLOW_MREG_HTABLE_SZ,
1653 flow_dv_mreg_create_cb,
1654 flow_dv_mreg_match_cb,
1655 flow_dv_mreg_remove_cb);
1656 if (!priv->mreg_cp_tbl) {
1660 priv->mreg_cp_tbl->ctx = eth_dev;
1662 rte_spinlock_init(&priv->shared_act_sl);
1663 mlx5_flow_counter_mode_config(eth_dev);
1664 if (priv->config.dv_flow_en)
1665 eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1669 if (priv->mreg_cp_tbl)
1670 mlx5_hlist_destroy(priv->mreg_cp_tbl);
1672 mlx5_os_free_shared_dr(priv);
1673 if (priv->nl_socket_route >= 0)
1674 close(priv->nl_socket_route);
1675 if (priv->nl_socket_rdma >= 0)
1676 close(priv->nl_socket_rdma);
1677 if (priv->vmwa_context)
1678 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1679 if (eth_dev && priv->drop_queue.hrxq)
1680 mlx5_drop_action_destroy(eth_dev);
1682 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1683 mlx5_cache_list_destroy(&priv->hrxqs);
1685 if (eth_dev != NULL)
1686 eth_dev->data->dev_private = NULL;
1688 if (eth_dev != NULL) {
1689 /* mac_addrs must not be freed alone because part of
1692 eth_dev->data->mac_addrs = NULL;
1693 rte_eth_dev_release_port(eth_dev);
1696 mlx5_free_shared_dev_ctx(sh);
1697 MLX5_ASSERT(err > 0);
1703 * Comparison callback to sort device data.
1705 * This is meant to be used with qsort().
1708 * Pointer to pointer to first data object.
1710 * Pointer to pointer to second data object.
1713 * 0 if both objects are equal, less than 0 if the first argument is less
1714 * than the second, greater than 0 otherwise.
1717 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1719 const struct mlx5_switch_info *si_a =
1720 &((const struct mlx5_dev_spawn_data *)a)->info;
1721 const struct mlx5_switch_info *si_b =
1722 &((const struct mlx5_dev_spawn_data *)b)->info;
1725 /* Master device first. */
1726 ret = si_b->master - si_a->master;
1729 /* Then representor devices. */
1730 ret = si_b->representor - si_a->representor;
1733 /* Unidentified devices come last in no specific order. */
1734 if (!si_a->representor)
1736 /* Order representors by name. */
1737 return si_a->port_name - si_b->port_name;
1741 * Match PCI information for possible slaves of bonding device.
1743 * @param[in] ibv_dev
1744 * Pointer to Infiniband device structure.
1745 * @param[in] pci_dev
1746 * Pointer to primary PCI address structure to match.
1747 * @param[in] nl_rdma
1748 * Netlink RDMA group socket handle.
1750 * Rerepsentor owner PF index.
1753 * negative value if no bonding device found, otherwise
1754 * positive index of slave PF in bonding.
1757 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
1758 const struct rte_pci_addr *pci_dev,
1759 int nl_rdma, uint16_t owner)
1761 char ifname[IF_NAMESIZE + 1];
1762 unsigned int ifindex;
1768 * Try to get master device name. If something goes
1769 * wrong suppose the lack of kernel support and no
1774 if (!strstr(ibv_dev->name, "bond"))
1776 np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
1780 * The Master device might not be on the predefined
1781 * port (not on port index 1, it is not garanted),
1782 * we have to scan all Infiniband device port and
1785 for (i = 1; i <= np; ++i) {
1786 /* Check whether Infiniband port is populated. */
1787 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
1790 if (!if_indextoname(ifindex, ifname))
1792 /* Try to read bonding slave names from sysfs. */
1794 "/sys/class/net/%s/master/bonding/slaves", ifname);
1795 file = fopen(slaves, "r");
1801 /* Use safe format to check maximal buffer length. */
1802 MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1803 while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1804 char tmp_str[IF_NAMESIZE + 32];
1805 struct rte_pci_addr pci_addr;
1806 struct mlx5_switch_info info;
1808 /* Process slave interface names in the loop. */
1809 snprintf(tmp_str, sizeof(tmp_str),
1810 "/sys/class/net/%s", ifname);
1811 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
1812 DRV_LOG(WARNING, "can not get PCI address"
1813 " for netdev \"%s\"", ifname);
1816 if (pci_dev->domain != pci_addr.domain ||
1817 pci_dev->bus != pci_addr.bus ||
1818 pci_dev->devid != pci_addr.devid ||
1819 pci_dev->function + owner != pci_addr.function)
1821 /* Slave interface PCI address match found. */
1823 snprintf(tmp_str, sizeof(tmp_str),
1824 "/sys/class/net/%s/phys_port_name", ifname);
1825 file = fopen(tmp_str, "rb");
1828 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1829 if (fscanf(file, "%32s", tmp_str) == 1)
1830 mlx5_translate_port_name(tmp_str, &info);
1831 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
1832 info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1833 pf = info.port_name;
1842 * DPDK callback to register a PCI device.
1844 * This function spawns Ethernet devices out of a given PCI device.
1846 * @param[in] pci_drv
1847 * PCI driver structure (mlx5_driver).
1848 * @param[in] pci_dev
1849 * PCI device information.
1852 * 0 on success, a negative errno value otherwise and rte_errno is set.
1855 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1856 struct rte_pci_device *pci_dev)
1858 struct ibv_device **ibv_list;
1860 * Number of found IB Devices matching with requested PCI BDF.
1861 * nd != 1 means there are multiple IB devices over the same
1862 * PCI device and we have representors and master.
1864 unsigned int nd = 0;
1866 * Number of found IB device Ports. nd = 1 and np = 1..n means
1867 * we have the single multiport IB device, and there may be
1868 * representors attached to some of found ports.
1870 unsigned int np = 0;
1872 * Number of DPDK ethernet devices to Spawn - either over
1873 * multiple IB devices or multiple ports of single IB device.
1874 * Actually this is the number of iterations to spawn.
1876 unsigned int ns = 0;
1879 * < 0 - no bonding device (single one)
1880 * >= 0 - bonding device (value is slave PF index)
1883 struct mlx5_dev_spawn_data *list = NULL;
1884 struct mlx5_dev_config dev_config;
1885 unsigned int dev_config_vf;
1886 struct rte_eth_devargs eth_da = { .type = RTE_ETH_REPRESENTOR_NONE };
1887 struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
1890 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1891 mlx5_pmd_socket_init();
1892 ret = mlx5_init_once();
1894 DRV_LOG(ERR, "unable to init PMD global data: %s",
1895 strerror(rte_errno));
1898 if (pci_dev->device.devargs) {
1899 /* Parse representor information from device argument. */
1900 if (pci_dev->device.devargs->cls_str)
1901 ret = rte_eth_devargs_parse
1902 (pci_dev->device.devargs->cls_str, ð_da);
1904 DRV_LOG(ERR, "failed to parse device arguments: %s",
1905 pci_dev->device.devargs->cls_str);
1908 if (eth_da.type == RTE_ETH_REPRESENTOR_NONE) {
1909 /* Support legacy device argument */
1910 ret = rte_eth_devargs_parse
1911 (pci_dev->device.devargs->args, ð_da);
1913 DRV_LOG(ERR, "failed to parse device arguments: %s",
1914 pci_dev->device.devargs->args);
1920 ibv_list = mlx5_glue->get_device_list(&ret);
1922 rte_errno = errno ? errno : ENOSYS;
1923 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1927 * First scan the list of all Infiniband devices to find
1928 * matching ones, gathering into the list.
1930 struct ibv_device *ibv_match[ret + 1];
1931 int nl_route = mlx5_nl_init(NETLINK_ROUTE);
1932 int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1936 struct rte_pci_addr pci_addr;
1938 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1939 bd = mlx5_device_bond_pci_match
1940 (ibv_list[ret], &owner_pci, nl_rdma,
1944 * Bonding device detected. Only one match is allowed,
1945 * the bonding is supported over multi-port IB device,
1946 * there should be no matches on representor PCI
1947 * functions or non VF LAG bonding devices with
1948 * specified address.
1952 "multiple PCI match on bonding device"
1953 "\"%s\" found", ibv_list[ret]->name);
1958 /* Amend owner pci address if owner PF ID specified. */
1959 if (eth_da.nb_representor_ports)
1960 owner_pci.function += eth_da.ports[0];
1961 DRV_LOG(INFO, "PCI information matches for"
1962 " slave %d bonding device \"%s\"",
1963 bd, ibv_list[ret]->name);
1964 ibv_match[nd++] = ibv_list[ret];
1967 /* Bonding device not found. */
1968 if (mlx5_dev_to_pci_addr
1969 (ibv_list[ret]->ibdev_path, &pci_addr))
1971 if (owner_pci.domain != pci_addr.domain ||
1972 owner_pci.bus != pci_addr.bus ||
1973 owner_pci.devid != pci_addr.devid ||
1974 owner_pci.function != pci_addr.function)
1976 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1977 ibv_list[ret]->name);
1978 ibv_match[nd++] = ibv_list[ret];
1981 ibv_match[nd] = NULL;
1983 /* No device matches, just complain and bail out. */
1985 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1986 " are kernel drivers loaded?",
1987 owner_pci.domain, owner_pci.bus,
1988 owner_pci.devid, owner_pci.function);
1995 * Found single matching device may have multiple ports.
1996 * Each port may be representor, we have to check the port
1997 * number and check the representors existence.
2000 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
2002 DRV_LOG(WARNING, "can not get IB device \"%s\""
2003 " ports number", ibv_match[0]->name);
2004 if (bd >= 0 && !np) {
2005 DRV_LOG(ERR, "can not get ports"
2006 " for bonding device");
2012 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
2015 * This may happen if there is VF LAG kernel support and
2016 * application is compiled with older rdma_core library.
2019 "No kernel/verbs support for VF LAG bonding found.");
2020 rte_errno = ENOTSUP;
2026 * Now we can determine the maximal
2027 * amount of devices to be spawned.
2029 list = mlx5_malloc(MLX5_MEM_ZERO,
2030 sizeof(struct mlx5_dev_spawn_data) *
2032 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
2034 DRV_LOG(ERR, "spawn data array allocation failure");
2039 if (bd >= 0 || np > 1) {
2041 * Single IB device with multiple ports found,
2042 * it may be E-Switch master device and representors.
2043 * We have to perform identification through the ports.
2045 MLX5_ASSERT(nl_rdma >= 0);
2046 MLX5_ASSERT(ns == 0);
2047 MLX5_ASSERT(nd == 1);
2049 for (i = 1; i <= np; ++i) {
2050 list[ns].max_port = np;
2051 list[ns].phys_port = i;
2052 list[ns].phys_dev = ibv_match[0];
2053 list[ns].eth_dev = NULL;
2054 list[ns].pci_dev = pci_dev;
2055 list[ns].pf_bond = bd;
2056 list[ns].ifindex = mlx5_nl_ifindex
2058 mlx5_os_get_dev_device_name
2059 (list[ns].phys_dev), i);
2060 if (!list[ns].ifindex) {
2062 * No network interface index found for the
2063 * specified port, it means there is no
2064 * representor on this port. It's OK,
2065 * there can be disabled ports, for example
2066 * if sriov_numvfs < sriov_totalvfs.
2072 ret = mlx5_nl_switch_info
2076 if (ret || (!list[ns].info.representor &&
2077 !list[ns].info.master)) {
2079 * We failed to recognize representors with
2080 * Netlink, let's try to perform the task
2083 ret = mlx5_sysfs_switch_info
2087 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
2088 if (!ret && bd >= 0) {
2089 switch (list[ns].info.name_type) {
2090 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2091 if (list[ns].info.port_name == bd)
2094 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2096 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2098 case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2099 if (list[ns].info.pf_num == bd)
2108 if (!ret && (list[ns].info.representor ^
2109 list[ns].info.master))
2114 "unable to recognize master/representors"
2115 " on the IB device with multiple ports");
2122 * The existence of several matching entries (nd > 1) means
2123 * port representors have been instantiated. No existing Verbs
2124 * call nor sysfs entries can tell them apart, this can only
2125 * be done through Netlink calls assuming kernel drivers are
2126 * recent enough to support them.
2128 * In the event of identification failure through Netlink,
2129 * try again through sysfs, then:
2131 * 1. A single IB device matches (nd == 1) with single
2132 * port (np=0/1) and is not a representor, assume
2133 * no switch support.
2135 * 2. Otherwise no safe assumptions can be made;
2136 * complain louder and bail out.
2138 for (i = 0; i != nd; ++i) {
2139 memset(&list[ns].info, 0, sizeof(list[ns].info));
2140 list[ns].max_port = 1;
2141 list[ns].phys_port = 1;
2142 list[ns].phys_dev = ibv_match[i];
2143 list[ns].eth_dev = NULL;
2144 list[ns].pci_dev = pci_dev;
2145 list[ns].pf_bond = -1;
2146 list[ns].ifindex = 0;
2148 list[ns].ifindex = mlx5_nl_ifindex
2150 mlx5_os_get_dev_device_name
2151 (list[ns].phys_dev), 1);
2152 if (!list[ns].ifindex) {
2153 char ifname[IF_NAMESIZE];
2156 * Netlink failed, it may happen with old
2157 * ib_core kernel driver (before 4.16).
2158 * We can assume there is old driver because
2159 * here we are processing single ports IB
2160 * devices. Let's try sysfs to retrieve
2161 * the ifindex. The method works for
2162 * master device only.
2166 * Multiple devices found, assume
2167 * representors, can not distinguish
2168 * master/representor and retrieve
2169 * ifindex via sysfs.
2173 ret = mlx5_get_ifname_sysfs
2174 (ibv_match[i]->ibdev_path, ifname);
2177 if_nametoindex(ifname);
2178 if (!list[ns].ifindex) {
2180 * No network interface index found
2181 * for the specified device, it means
2182 * there it is neither representor
2190 ret = mlx5_nl_switch_info
2194 if (ret || (!list[ns].info.representor &&
2195 !list[ns].info.master)) {
2197 * We failed to recognize representors with
2198 * Netlink, let's try to perform the task
2201 ret = mlx5_sysfs_switch_info
2205 if (!ret && (list[ns].info.representor ^
2206 list[ns].info.master)) {
2208 } else if ((nd == 1) &&
2209 !list[ns].info.representor &&
2210 !list[ns].info.master) {
2212 * Single IB device with
2213 * one physical port and
2214 * attached network device.
2215 * May be SRIOV is not enabled
2216 * or there is no representors.
2218 DRV_LOG(INFO, "no E-Switch support detected");
2225 "unable to recognize master/representors"
2226 " on the multiple IB devices");
2234 * Sort list to probe devices in natural order for users convenience
2235 * (i.e. master first, then representors from lowest to highest ID).
2237 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2238 /* Device specific configuration. */
2239 switch (pci_dev->id.device_id) {
2240 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2241 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2242 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2243 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2244 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
2245 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
2246 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
2253 if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) {
2254 /* Set devargs default values. */
2255 if (eth_da.nb_mh_controllers == 0) {
2256 eth_da.nb_mh_controllers = 1;
2257 eth_da.mh_controllers[0] = 0;
2259 if (eth_da.nb_ports == 0 && ns > 0) {
2260 if (list[0].pf_bond >= 0 && list[0].info.representor)
2261 DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s",
2262 pci_dev->device.devargs->args);
2263 eth_da.nb_ports = 1;
2264 eth_da.ports[0] = list[0].info.pf_num;
2266 if (eth_da.nb_representor_ports == 0) {
2267 eth_da.nb_representor_ports = 1;
2268 eth_da.representor_ports[0] = 0;
2271 for (i = 0; i != ns; ++i) {
2274 /* Default configuration. */
2275 memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
2276 dev_config.vf = dev_config_vf;
2277 dev_config.mps = MLX5_ARG_UNSET;
2278 dev_config.dbnc = MLX5_ARG_UNSET;
2279 dev_config.rx_vec_en = 1;
2280 dev_config.txq_inline_max = MLX5_ARG_UNSET;
2281 dev_config.txq_inline_min = MLX5_ARG_UNSET;
2282 dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
2283 dev_config.txqs_inline = MLX5_ARG_UNSET;
2284 dev_config.vf_nl_en = 1;
2285 dev_config.mr_ext_memseg_en = 1;
2286 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
2287 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
2288 dev_config.dv_esw_en = 1;
2289 dev_config.dv_flow_en = 1;
2290 dev_config.decap_en = 1;
2291 dev_config.log_hp_size = MLX5_ARG_UNSET;
2292 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2296 if (!list[i].eth_dev) {
2297 if (rte_errno != EBUSY && rte_errno != EEXIST)
2299 /* Device is disabled or already spawned. Ignore it. */
2302 restore = list[i].eth_dev->data->dev_flags;
2303 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2304 /* Restore non-PCI flags cleared by the above call. */
2305 list[i].eth_dev->data->dev_flags |= restore;
2306 rte_eth_dev_probing_finish(list[i].eth_dev);
2310 "probe of PCI device " PCI_PRI_FMT " aborted after"
2311 " encountering an error: %s",
2312 owner_pci.domain, owner_pci.bus,
2313 owner_pci.devid, owner_pci.function,
2314 strerror(rte_errno));
2318 if (!list[i].eth_dev)
2320 mlx5_dev_close(list[i].eth_dev);
2321 /* mac_addrs must not be freed because in dev_private */
2322 list[i].eth_dev->data->mac_addrs = NULL;
2323 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2325 /* Restore original error. */
2332 * Do the routine cleanup:
2333 * - close opened Netlink sockets
2334 * - free allocated spawn data array
2335 * - free the Infiniband device list
2343 MLX5_ASSERT(ibv_list);
2344 mlx5_glue->free_device_list(ibv_list);
2349 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
2354 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2355 /* Get environment variable to store. */
2356 env = getenv(MLX5_SHUT_UP_BF);
2357 value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
2358 if (config->dbnc == MLX5_ARG_UNSET)
2359 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
2361 setenv(MLX5_SHUT_UP_BF,
2362 config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
2367 mlx5_restore_doorbell_mapping_env(int value)
2369 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
2370 /* Restore the original environment variable state. */
2371 if (value == MLX5_ARG_UNSET)
2372 unsetenv(MLX5_SHUT_UP_BF);
2374 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
2378 * Extract pdn of PD object using DV API.
2381 * Pointer to the verbs PD object.
2383 * Pointer to the PD object number variable.
2386 * 0 on success, error value otherwise.
2389 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
2391 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2392 struct mlx5dv_obj obj;
2393 struct mlx5dv_pd pd_info;
2397 obj.pd.out = &pd_info;
2398 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
2400 DRV_LOG(DEBUG, "Fail to get PD object info");
2409 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
2413 * Function API to open IB device.
2415 * This function calls the Linux glue APIs to open a device.
2418 * Pointer to the IB device attributes (name, port, etc).
2419 * @param[out] config
2420 * Pointer to device configuration structure.
2422 * Pointer to shared context structure.
2425 * 0 on success, a positive error value otherwise.
2428 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
2429 const struct mlx5_dev_config *config,
2430 struct mlx5_dev_ctx_shared *sh)
2435 sh->numa_node = spawn->pci_dev->device.numa_node;
2436 pthread_mutex_init(&sh->txpp.mutex, NULL);
2438 * Configure environment variable "MLX5_BF_SHUT_UP"
2439 * before the device creation. The rdma_core library
2440 * checks the variable at device creation and
2441 * stores the result internally.
2443 dbmap_env = mlx5_config_doorbell_mapping_env(config);
2444 /* Try to open IB device with DV first, then usual Verbs. */
2446 sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
2449 DRV_LOG(DEBUG, "DevX is supported");
2450 /* The device is created, no need for environment. */
2451 mlx5_restore_doorbell_mapping_env(dbmap_env);
2453 /* The environment variable is still configured. */
2454 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
2455 err = errno ? errno : ENODEV;
2457 * The environment variable is not needed anymore,
2458 * all device creation attempts are completed.
2460 mlx5_restore_doorbell_mapping_env(dbmap_env);
2463 DRV_LOG(DEBUG, "DevX is NOT supported");
2466 if (!err && sh->ctx) {
2467 /* Hint libmlx5 to use PMD allocator for data plane resources */
2468 mlx5_glue->dv_set_context_attr(sh->ctx,
2469 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
2470 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
2471 .alloc = &mlx5_alloc_verbs_buf,
2472 .free = &mlx5_free_verbs_buf,
2480 * Install shared asynchronous device events handler.
2481 * This function is implemented to support event sharing
2482 * between multiple ports of single IB device.
2485 * Pointer to mlx5_dev_ctx_shared object.
2488 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
2493 sh->intr_handle.fd = -1;
2494 flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
2495 ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
2496 F_SETFL, flags | O_NONBLOCK);
2498 DRV_LOG(INFO, "failed to change file descriptor async event"
2501 sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
2502 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
2503 if (rte_intr_callback_register(&sh->intr_handle,
2504 mlx5_dev_interrupt_handler, sh)) {
2505 DRV_LOG(INFO, "Fail to install the shared interrupt.");
2506 sh->intr_handle.fd = -1;
2510 #ifdef HAVE_IBV_DEVX_ASYNC
2511 sh->intr_handle_devx.fd = -1;
2513 (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
2514 struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
2516 DRV_LOG(INFO, "failed to allocate devx_comp.");
2519 flags = fcntl(devx_comp->fd, F_GETFL);
2520 ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
2522 DRV_LOG(INFO, "failed to change file descriptor"
2526 sh->intr_handle_devx.fd = devx_comp->fd;
2527 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
2528 if (rte_intr_callback_register(&sh->intr_handle_devx,
2529 mlx5_dev_interrupt_handler_devx, sh)) {
2530 DRV_LOG(INFO, "Fail to install the devx shared"
2532 sh->intr_handle_devx.fd = -1;
2534 #endif /* HAVE_IBV_DEVX_ASYNC */
2539 * Uninstall shared asynchronous device events handler.
2540 * This function is implemented to support event sharing
2541 * between multiple ports of single IB device.
2544 * Pointer to mlx5_dev_ctx_shared object.
2547 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
2549 if (sh->intr_handle.fd >= 0)
2550 mlx5_intr_callback_unregister(&sh->intr_handle,
2551 mlx5_dev_interrupt_handler, sh);
2552 #ifdef HAVE_IBV_DEVX_ASYNC
2553 if (sh->intr_handle_devx.fd >= 0)
2554 rte_intr_callback_unregister(&sh->intr_handle_devx,
2555 mlx5_dev_interrupt_handler_devx, sh);
2557 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
2562 * Read statistics by a named counter.
2565 * Pointer to the private device data structure.
2566 * @param[in] ctr_name
2567 * Pointer to the name of the statistic counter to read
2569 * Pointer to read statistic value.
2571 * 0 on success and stat is valud, 1 if failed to read the value
2576 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2582 if (priv->q_counters != NULL &&
2583 strcmp(ctr_name, "out_of_buffer") == 0)
2584 return mlx5_devx_cmd_queue_counter_query(priv->sh->ctx,
2585 0, (uint32_t *)stat);
2586 MKSTR(path, "%s/ports/%d/hw_counters/%s",
2587 priv->sh->ibdev_path,
2590 fd = open(path, O_RDONLY);
2592 * in switchdev the file location is not per port
2593 * but rather in <ibdev_path>/hw_counters/<file_name>.
2596 MKSTR(path1, "%s/hw_counters/%s",
2597 priv->sh->ibdev_path,
2599 fd = open(path1, O_RDONLY);
2602 char buf[21] = {'\0'};
2603 ssize_t n = read(fd, buf, sizeof(buf));
2607 *stat = strtoull(buf, NULL, 10);
2617 * Set the reg_mr and dereg_mr call backs
2619 * @param reg_mr_cb[out]
2620 * Pointer to reg_mr func
2621 * @param dereg_mr_cb[out]
2622 * Pointer to dereg_mr func
2626 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
2627 mlx5_dereg_mr_t *dereg_mr_cb)
2629 *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr;
2630 *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr;
2634 * Remove a MAC address from device
2637 * Pointer to Ethernet device structure.
2639 * MAC address index.
2642 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2644 struct mlx5_priv *priv = dev->data->dev_private;
2645 const int vf = priv->config.vf;
2648 mlx5_nl_mac_addr_remove(priv->nl_socket_route,
2649 mlx5_ifindex(dev), priv->mac_own,
2650 &dev->data->mac_addrs[index], index);
2654 * Adds a MAC address to the device
2657 * Pointer to Ethernet device structure.
2659 * MAC address to register.
2661 * MAC address index.
2664 * 0 on success, a negative errno value otherwise
2667 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
2670 struct mlx5_priv *priv = dev->data->dev_private;
2671 const int vf = priv->config.vf;
2675 ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
2676 mlx5_ifindex(dev), priv->mac_own,
2682 * Modify a VF MAC address
2685 * Pointer to device private data.
2687 * MAC address to modify into.
2689 * Net device interface index
2694 * 0 on success, a negative errno value otherwise
2697 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
2698 unsigned int iface_idx,
2699 struct rte_ether_addr *mac_addr,
2702 return mlx5_nl_vf_mac_addr_modify
2703 (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
2707 * Set device promiscuous mode
2710 * Pointer to Ethernet device structure.
2712 * 0 - promiscuous is disabled, otherwise - enabled
2715 * 0 on success, a negative error value otherwise
2718 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
2720 struct mlx5_priv *priv = dev->data->dev_private;
2722 return mlx5_nl_promisc(priv->nl_socket_route,
2723 mlx5_ifindex(dev), !!enable);
2727 * Set device promiscuous mode
2730 * Pointer to Ethernet device structure.
2732 * 0 - all multicase is disabled, otherwise - enabled
2735 * 0 on success, a negative error value otherwise
2738 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
2740 struct mlx5_priv *priv = dev->data->dev_private;
2742 return mlx5_nl_allmulti(priv->nl_socket_route,
2743 mlx5_ifindex(dev), !!enable);
2747 * Flush device MAC addresses
2750 * Pointer to Ethernet device structure.
2754 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
2756 struct mlx5_priv *priv = dev->data->dev_private;
2758 mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
2759 dev->data->mac_addrs,
2760 MLX5_MAX_MAC_ADDRESSES, priv->mac_own);