1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
28 #include "mlx5_autoconf.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_devx.h"
33 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
35 /* Spinlock for mlx5_shared_data allocation. */
36 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
38 /* rte flow indexed pool configuration. */
39 static struct mlx5_indexed_pool_config icfg[] = {
41 .size = sizeof(struct rte_flow),
45 .malloc = mlx5_malloc,
48 .type = "ctl_flow_ipool",
51 .size = sizeof(struct rte_flow),
57 .malloc = mlx5_malloc,
59 .per_core_cache = 1 << 14,
60 .type = "rte_flow_ipool",
63 .size = sizeof(struct rte_flow),
69 .malloc = mlx5_malloc,
72 .type = "mcp_flow_ipool",
77 * Initialize shared data between primary and secondary process.
79 * A memzone is reserved by primary process and secondary processes attach to
83 * 0 on success, a negative errno value otherwise and rte_errno is set.
86 mlx5_init_shared_data(void)
88 const struct rte_memzone *mz;
91 rte_spinlock_lock(&mlx5_shared_data_lock);
92 if (mlx5_shared_data == NULL) {
93 /* Allocate shared memory. */
94 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
95 sizeof(*mlx5_shared_data),
99 "Cannot allocate mlx5 shared data");
103 mlx5_shared_data = mz->addr;
104 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
105 rte_spinlock_init(&mlx5_shared_data->lock);
108 rte_spinlock_unlock(&mlx5_shared_data_lock);
113 * PMD global initialization.
115 * Independent from individual device, this function initializes global
116 * per-PMD data structures distinguishing primary and secondary processes.
117 * Hence, each initialization is called once per a process.
120 * 0 on success, a negative errno value otherwise and rte_errno is set.
125 struct mlx5_shared_data *sd;
127 if (mlx5_init_shared_data())
129 sd = mlx5_shared_data;
130 rte_spinlock_lock(&sd->lock);
132 if (!sd->init_done) {
133 LIST_INIT(&sd->mem_event_cb_list);
134 rte_rwlock_init(&sd->mem_event_rwlock);
135 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
136 mlx5_mr_mem_event_cb, NULL);
137 sd->init_done = true;
139 rte_spinlock_unlock(&sd->lock);
144 * Get mlx5 device attributes.
147 * Pointer to device context.
150 * Pointer to mlx5 device attributes.
153 * 0 on success, non zero error number otherwise
156 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
158 struct mlx5_context *mlx5_ctx;
159 struct mlx5_hca_attr hca_attr;
160 void *pv_iseg = NULL;
166 mlx5_ctx = (struct mlx5_context *)ctx;
167 memset(device_attr, 0, sizeof(*device_attr));
168 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
170 DRV_LOG(ERR, "Failed to get device hca_cap");
173 device_attr->max_cq = 1 << hca_attr.log_max_cq;
174 device_attr->max_qp = 1 << hca_attr.log_max_qp;
175 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
176 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
177 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
178 device_attr->max_pd = 1 << hca_attr.log_max_pd;
179 device_attr->max_srq = 1 << hca_attr.log_max_srq;
180 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
181 device_attr->max_tso = 1 << hca_attr.max_lso_cap;
182 if (hca_attr.rss_ind_tbl_cap) {
183 device_attr->max_rwq_indirection_table_size =
184 1 << hca_attr.rss_ind_tbl_cap;
186 device_attr->sw_parsing_offloads =
187 mlx5_get_supported_sw_parsing_offloads(&hca_attr);
188 device_attr->tunnel_offloads_caps =
189 mlx5_get_supported_tunneling_offloads(&hca_attr);
190 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
191 if (pv_iseg == NULL) {
192 DRV_LOG(ERR, "Failed to get device hca_iseg");
196 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
197 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
198 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
199 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
205 * Initialize DR related data within private structure.
206 * Routine checks the reference counter and does actual
207 * resources creation/initialization only if counter is zero.
210 * Pointer to the private device data structure.
213 * Zero on success, positive error code otherwise.
216 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
218 struct mlx5_dev_ctx_shared *sh = priv->sh;
222 err = mlx5_alloc_table_hash_list(priv);
224 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
225 (void *)sh->flow_tbls);
229 * Destroy DR related data within private structure.
232 * Pointer to the private device data structure.
235 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
237 mlx5_free_table_hash_list(priv);
241 * Set the completion channel file descriptor interrupt as non-blocking.
242 * Currently it has no support under Windows.
245 * Pointer to RQ channel object, which includes the channel fd
248 * The file descriptor (representing the intetrrupt) used in this channel.
251 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
254 mlx5_os_set_nonblock_channel_fd(int fd)
257 DRV_LOG(WARNING, "%s: is not supported", __func__);
262 * DV flow counter mode detect and config.
265 * Pointer to rte_eth_dev structure.
269 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
271 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
272 struct mlx5_priv *priv = dev->data->dev_private;
273 struct mlx5_dev_ctx_shared *sh = priv->sh;
276 #ifndef HAVE_IBV_DEVX_ASYNC
280 if (!sh->devx || !priv->config.dv_flow_en ||
281 !priv->config.hca_attr.flow_counters_dump ||
282 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
283 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
287 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
288 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
289 priv->config.hca_attr.flow_counters_dump,
290 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
291 /* Initialize fallback mode only on the port initializes sh. */
293 sh->cmng.counter_fallback = fallback;
294 else if (fallback != sh->cmng.counter_fallback)
295 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
296 "with others:%d.", PORT_ID(priv), fallback);
301 * Spawn an Ethernet device from DevX information.
304 * Backing DPDK device.
306 * Verbs device parameters (name, port, switch_info) to spawn.
308 * Device configuration parameters.
311 * A valid Ethernet device object on success, NULL otherwise and rte_errno
312 * is set. The following errors are defined:
314 * EEXIST: device is already spawned
316 static struct rte_eth_dev *
317 mlx5_dev_spawn(struct rte_device *dpdk_dev,
318 struct mlx5_dev_spawn_data *spawn,
319 struct mlx5_dev_config *config)
321 const struct mlx5_switch_info *switch_info = &spawn->info;
322 struct mlx5_dev_ctx_shared *sh = NULL;
323 struct mlx5_dev_attr device_attr;
324 struct rte_eth_dev *eth_dev = NULL;
325 struct mlx5_priv *priv = NULL;
327 unsigned int cqe_comp;
328 struct rte_ether_addr mac;
329 char name[RTE_ETH_NAME_MAX_LEN];
330 int own_domain_id = 0;
334 /* Build device name. */
335 strlcpy(name, dpdk_dev->name, sizeof(name));
336 /* check if the device is already spawned */
337 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
341 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
343 * Some parameters are needed in advance to create device context. We
344 * process the devargs here to get ones, and later process devargs
345 * again to override some hardware settings.
347 err = mlx5_args(config, dpdk_dev->devargs);
350 DRV_LOG(ERR, "failed to process device arguments: %s",
351 strerror(rte_errno));
354 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
357 /* Initialize the shutdown event in mlx5_dev_spawn to
358 * support mlx5_is_removed for Windows.
360 err = mlx5_glue->devx_init_showdown_event(sh->ctx);
362 DRV_LOG(ERR, "failed to init showdown event: %s",
366 DRV_LOG(DEBUG, "MPW isn't supported");
367 mlx5_os_get_dev_attr(sh->ctx, &device_attr);
368 config->swp = device_attr.sw_parsing_offloads &
369 (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
370 MLX5_SW_PARSING_TSO_CAP);
371 config->ind_table_max_size =
372 sh->device_attr.max_rwq_indirection_table_size;
374 config->cqe_comp = cqe_comp;
375 config->tunnel_en = device_attr.tunnel_offloads_caps &
376 (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
377 MLX5_TUNNELED_OFFLOADS_GRE_CAP |
378 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
379 if (config->tunnel_en) {
380 DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
382 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
384 MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
386 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
389 DRV_LOG(DEBUG, "tunnel offloading is not supported");
391 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
393 /* Allocate private eth device data. */
394 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
396 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
398 DRV_LOG(ERR, "priv allocation failure");
403 priv->dev_port = spawn->phys_port;
404 priv->pci_dev = spawn->pci_dev;
405 priv->mtu = RTE_ETHER_MTU;
406 priv->mp_id.port_id = port_id;
407 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
408 priv->representor = !!switch_info->representor;
409 priv->master = !!switch_info->master;
410 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
411 priv->vport_meta_tag = 0;
412 priv->vport_meta_mask = 0;
413 priv->pf_bond = spawn->pf_bond;
415 /* representor_id field keeps the unmodified VF index. */
416 priv->representor_id = -1;
418 * Look for sibling devices in order to reuse their switch domain
419 * if any, otherwise allocate one.
421 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
422 const struct mlx5_priv *opriv =
423 rte_eth_devices[port_id].data->dev_private;
426 opriv->sh != priv->sh ||
428 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
430 priv->domain_id = opriv->domain_id;
433 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
434 err = rte_eth_switch_domain_alloc(&priv->domain_id);
437 DRV_LOG(ERR, "unable to allocate switch domain: %s",
438 strerror(rte_errno));
443 /* Override some values set by hardware configuration. */
444 mlx5_args(config, dpdk_dev->devargs);
445 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
448 DRV_LOG(DEBUG, "counters are not supported");
449 config->ind_table_max_size =
450 sh->device_attr.max_rwq_indirection_table_size;
452 * Remove this check once DPDK supports larger/variable
453 * indirection tables.
455 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
456 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
457 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
458 config->ind_table_max_size);
459 if (config->hw_padding) {
460 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
461 config->hw_padding = 0;
463 config->tso = (sh->device_attr.max_tso > 0);
465 config->tso_max_payload_sz = sh->device_attr.max_tso;
466 DRV_LOG(DEBUG, "%sMPS is %s.",
467 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
468 config->mps == MLX5_MPW ? "legacy " : "",
469 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
470 if (config->cqe_comp && !cqe_comp) {
471 DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
472 config->cqe_comp = 0;
475 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
480 /* Check relax ordering support. */
481 sh->cmng.relaxed_ordering_read = 0;
482 sh->cmng.relaxed_ordering_write = 0;
483 if (!haswell_broadwell_cpu) {
484 sh->cmng.relaxed_ordering_write =
485 config->hca_attr.relaxed_ordering_write;
486 sh->cmng.relaxed_ordering_read =
487 config->hca_attr.relaxed_ordering_read;
489 config->hw_csum = config->hca_attr.csum_cap;
490 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
491 (config->hw_csum ? "" : "not "));
492 config->hw_vlan_strip = config->hca_attr.vlan_cap;
493 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
494 (config->hw_vlan_strip ? "" : "not "));
495 config->hw_fcs_strip = config->hca_attr.scatter_fcs;
498 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
500 err = config->hca_attr.access_register_user ?
501 mlx5_devx_cmd_register_read
502 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
503 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
507 /* MTUTC register is read successfully. */
508 ts_mode = MLX5_GET(register_mtutc, reg,
510 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
511 config->rt_timestamp = 1;
513 /* Kernel does not support register reading. */
514 if (config->hca_attr.dev_freq_khz ==
515 (NS_PER_S / MS_PER_S))
516 config->rt_timestamp = 1;
518 sh->rq_ts_format = config->hca_attr.rq_ts_format;
519 sh->sq_ts_format = config->hca_attr.sq_ts_format;
520 sh->qp_ts_format = config->hca_attr.qp_ts_format;
522 if (config->mprq.enabled) {
523 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
524 config->mprq.enabled = 0;
526 if (config->max_dump_files_num == 0)
527 config->max_dump_files_num = 128;
528 eth_dev = rte_eth_dev_allocate(name);
529 if (eth_dev == NULL) {
530 DRV_LOG(ERR, "can not allocate rte ethdev");
534 if (priv->representor) {
535 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
536 eth_dev->data->representor_id = priv->representor_id;
537 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
538 struct mlx5_priv *opriv =
539 rte_eth_devices[port_id].data->dev_private;
542 opriv->domain_id == priv->domain_id &&
543 opriv->sh == priv->sh) {
544 eth_dev->data->backer_port_id = port_id;
548 if (port_id >= RTE_MAX_ETHPORTS)
549 eth_dev->data->backer_port_id = eth_dev->data->port_id;
552 * Store associated network device interface index. This index
553 * is permanent throughout the lifetime of device. So, we may store
554 * the ifindex here and use the cached value further.
556 MLX5_ASSERT(spawn->ifindex);
557 priv->if_index = spawn->ifindex;
558 eth_dev->data->dev_private = priv;
559 priv->dev_data = eth_dev->data;
560 eth_dev->data->mac_addrs = priv->mac;
561 eth_dev->device = dpdk_dev;
562 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
563 /* Configure the first MAC address by default. */
564 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
566 "port %u cannot get MAC address, is mlx5_en"
567 " loaded? (errno: %s).",
568 eth_dev->data->port_id, strerror(rte_errno));
573 "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
574 eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
575 #ifdef RTE_LIBRTE_MLX5_DEBUG
577 char ifname[MLX5_NAMESIZE];
579 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
580 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
581 eth_dev->data->port_id, ifname);
583 DRV_LOG(DEBUG, "port %u ifname is unknown.",
584 eth_dev->data->port_id);
587 /* Get actual MTU if possible. */
588 err = mlx5_get_mtu(eth_dev, &priv->mtu);
593 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
595 /* Initialize burst functions to prevent crashes before link-up. */
596 eth_dev->rx_pkt_burst = removed_rx_burst;
597 eth_dev->tx_pkt_burst = removed_tx_burst;
598 eth_dev->dev_ops = &mlx5_dev_ops;
599 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
600 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
601 eth_dev->rx_queue_count = mlx5_rx_queue_count;
602 /* Register MAC address. */
603 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
604 priv->ctrl_flows = 0;
605 TAILQ_INIT(&priv->flow_meters);
606 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
607 if (!priv->mtr_profile_tbl)
609 /* Bring Ethernet device up. */
610 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
611 eth_dev->data->port_id);
612 /* nl calls are unsupported - set to -1 not to fail on release */
613 priv->nl_socket_rdma = -1;
614 priv->nl_socket_route = -1;
615 mlx5_set_link_up(eth_dev);
617 * Even though the interrupt handler is not installed yet,
618 * interrupts will still trigger on the async_fd from
619 * Verbs context returned by ibv_open_device().
621 mlx5_link_update(eth_dev, 0);
622 config->dv_esw_en = 0;
623 /* Detect minimal data bytes to inline. */
624 mlx5_set_min_inline(spawn, config);
625 /* Store device configuration on private structure. */
626 priv->config = *config;
627 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
628 icfg[i].release_mem_en = !!config->reclaim_mode;
629 if (config->reclaim_mode)
630 icfg[i].per_core_cache = 0;
631 priv->flows[i] = mlx5_ipool_create(&icfg[i]);
635 /* Create context for virtual machine VLAN workaround. */
636 priv->vmwa_context = NULL;
637 if (config->dv_flow_en) {
638 err = mlx5_alloc_shared_dr(priv);
642 /* No supported flow priority number detection. */
643 priv->config.flow_prio = -1;
644 if (!priv->config.dv_esw_en &&
645 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
646 DRV_LOG(WARNING, "metadata mode %u is not supported "
647 "(no E-Switch)", priv->config.dv_xmeta_en);
648 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
650 mlx5_set_metadata_mask(eth_dev);
651 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
652 !priv->sh->dv_regc0_mask) {
653 DRV_LOG(ERR, "metadata mode %u is not supported "
654 "(no metadata reg_c[0] is available).",
655 priv->config.dv_xmeta_en);
659 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
660 mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
661 mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
662 mlx5_hrxq_clone_free_cb);
663 /* Query availability of metadata reg_c's. */
664 err = mlx5_flow_discover_mreg_c(eth_dev);
669 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
671 "port %u extensive metadata register is not supported.",
672 eth_dev->data->port_id);
673 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
674 DRV_LOG(ERR, "metadata mode %u is not supported "
675 "(no metadata registers available).",
676 priv->config.dv_xmeta_en);
681 if (sh->devx && config->dv_flow_en) {
682 priv->obj_ops = devx_obj_ops;
684 DRV_LOG(ERR, "Flow mode %u is not supported "
685 "(Windows flow must be DevX with DV flow enabled).",
686 priv->config.dv_flow_en);
690 mlx5_flow_counter_mode_config(eth_dev);
694 if (priv->mtr_profile_tbl)
695 mlx5_l3t_destroy(priv->mtr_profile_tbl);
697 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
700 eth_dev->data->dev_private = NULL;
702 if (eth_dev != NULL) {
703 /* mac_addrs must not be freed alone because part of
706 eth_dev->data->mac_addrs = NULL;
707 rte_eth_dev_release_port(eth_dev);
710 mlx5_free_shared_dev_ctx(sh);
711 MLX5_ASSERT(err > 0);
717 * This function should share events between multiple ports of single IB
718 * device. Currently it has no support under Windows.
721 * Pointer to mlx5_dev_ctx_shared object.
724 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
727 DRV_LOG(WARNING, "%s: is not supported", __func__);
731 * This function should share events between multiple ports of single IB
732 * device. Currently it has no support under Windows.
735 * Pointer to mlx5_dev_ctx_shared object.
738 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
741 DRV_LOG(WARNING, "%s: is not supported", __func__);
745 * Read statistics by a named counter.
748 * Pointer to the private device data structure.
749 * @param[in] ctr_name
750 * Pointer to the name of the statistic counter to read
752 * Pointer to read statistic value.
754 * 0 on success and stat is valud, 1 if failed to read the value
759 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
763 RTE_SET_USED(ctr_name);
765 DRV_LOG(WARNING, "%s: is not supported", __func__);
770 * Flush device MAC addresses
771 * Currently it has no support under Windows.
774 * Pointer to Ethernet device structure.
778 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
781 DRV_LOG(WARNING, "%s: is not supported", __func__);
785 * Remove a MAC address from device
786 * Currently it has no support under Windows.
789 * Pointer to Ethernet device structure.
794 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
798 DRV_LOG(WARNING, "%s: is not supported", __func__);
802 * Adds a MAC address to the device
803 * Currently it has no support under Windows.
806 * Pointer to Ethernet device structure.
808 * MAC address to register.
813 * 0 on success, a negative errno value otherwise
816 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
820 struct rte_ether_addr lmac;
822 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
824 "port %u cannot get MAC address, is mlx5_en"
825 " loaded? (errno: %s)",
826 dev->data->port_id, strerror(rte_errno));
829 if (!rte_is_same_ether_addr(&lmac, mac)) {
831 "adding new mac address to device is unsupported");
838 * Modify a VF MAC address
839 * Currently it has no support under Windows.
842 * Pointer to device private data.
844 * MAC address to modify into.
846 * Net device interface index
851 * 0 on success, a negative errno value otherwise
854 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
855 unsigned int iface_idx,
856 struct rte_ether_addr *mac_addr,
863 DRV_LOG(WARNING, "%s: is not supported", __func__);
868 * Set device promiscuous mode
869 * Currently it has no support under Windows.
872 * Pointer to Ethernet device structure.
874 * 0 - promiscuous is disabled, otherwise - enabled
877 * 0 on success, a negative error value otherwise
880 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
884 DRV_LOG(WARNING, "%s: is not supported", __func__);
889 * Set device allmulti mode
892 * Pointer to Ethernet device structure.
894 * 0 - all multicase is disabled, otherwise - enabled
897 * 0 on success, a negative error value otherwise
900 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
904 DRV_LOG(WARNING, "%s: is not supported", __func__);
909 * DPDK callback to register a PCI device.
911 * This function spawns Ethernet devices out of a given device.
914 * Pointer to the common device.
917 * 0 on success, a negative errno value otherwise and rte_errno is set.
920 mlx5_os_net_probe(struct mlx5_common_device *cdev)
922 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
923 struct mlx5_dev_spawn_data spawn = {
929 .ifindex = -1, /* Spawn will assign */
930 .info = (struct mlx5_switch_info){
931 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
934 struct mlx5_dev_config dev_config = {
936 .txq_inline_max = MLX5_ARG_UNSET,
937 .txq_inline_min = MLX5_ARG_UNSET,
938 .txq_inline_mpw = MLX5_ARG_UNSET,
939 .txqs_inline = MLX5_ARG_UNSET,
941 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
942 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
945 .log_hp_size = MLX5_ARG_UNSET,
951 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
952 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
955 ret = mlx5_os_open_device(cdev, &ctx);
957 DRV_LOG(ERR, "Fail to open DevX device %s", cdev->dev->name);
960 ret = mlx5_init_once();
962 DRV_LOG(ERR, "unable to init PMD global data: %s",
963 strerror(rte_errno));
964 claim_zero(mlx5_glue->close_device(ctx));
968 spawn.phys_dev_name = mlx5_os_get_ctx_device_name(ctx);
969 /* Device specific configuration. */
970 switch (pci_dev->id.device_id) {
971 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
972 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
973 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
974 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
975 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
976 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
977 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
984 spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
985 if (!spawn.eth_dev) {
986 claim_zero(mlx5_glue->close_device(ctx));
989 restore = spawn.eth_dev->data->dev_flags;
990 rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
991 /* Restore non-PCI flags cleared by the above call. */
992 spawn.eth_dev->data->dev_flags |= restore;
993 rte_eth_dev_probing_finish(spawn.eth_dev);
998 * Set the reg_mr and dereg_mr call backs
1000 * @param reg_mr_cb[out]
1001 * Pointer to reg_mr func
1002 * @param dereg_mr_cb[out]
1003 * Pointer to dereg_mr func
1007 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
1008 mlx5_dereg_mr_t *dereg_mr_cb)
1010 *reg_mr_cb = mlx5_os_reg_mr;
1011 *dereg_mr_cb = mlx5_os_dereg_mr;
1015 * Extract pdn of PD object using DevX
1018 * Pointer to the DevX PD object.
1020 * Pointer to the PD object number variable.
1023 * 0 on success, error value otherwise.
1026 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1031 *pdn = ((struct mlx5_pd *)pd)->pdn;
1035 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};