1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
28 #include "mlx5_autoconf.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_devx.h"
32 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
34 /* Spinlock for mlx5_shared_data allocation. */
35 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
37 /* rte flow indexed pool configuration. */
38 static struct mlx5_indexed_pool_config icfg[] = {
40 .size = sizeof(struct rte_flow),
44 .malloc = mlx5_malloc,
47 .type = "ctl_flow_ipool",
50 .size = sizeof(struct rte_flow),
56 .malloc = mlx5_malloc,
58 .per_core_cache = 1 << 14,
59 .type = "rte_flow_ipool",
62 .size = sizeof(struct rte_flow),
68 .malloc = mlx5_malloc,
71 .type = "mcp_flow_ipool",
76 mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
78 struct mlx5_priv *priv = dev->data->dev_private;
79 void *ctx = priv->sh->cdev->ctx;
81 priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
82 if (!priv->q_counters) {
83 DRV_LOG(ERR, "Port %d queue counter object cannot be created "
84 "by DevX - imissed counter will be unavailable",
88 priv->counter_set_id = priv->q_counters->id;
92 * Initialize shared data between primary and secondary process.
94 * A memzone is reserved by primary process and secondary processes attach to
98 * 0 on success, a negative errno value otherwise and rte_errno is set.
101 mlx5_init_shared_data(void)
103 const struct rte_memzone *mz;
106 rte_spinlock_lock(&mlx5_shared_data_lock);
107 if (mlx5_shared_data == NULL) {
108 /* Allocate shared memory. */
109 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
110 sizeof(*mlx5_shared_data),
114 "Cannot allocate mlx5 shared data");
118 mlx5_shared_data = mz->addr;
119 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
120 rte_spinlock_init(&mlx5_shared_data->lock);
123 rte_spinlock_unlock(&mlx5_shared_data_lock);
128 * PMD global initialization.
130 * Independent from individual device, this function initializes global
131 * per-PMD data structures distinguishing primary and secondary processes.
132 * Hence, each initialization is called once per a process.
135 * 0 on success, a negative errno value otherwise and rte_errno is set.
140 if (mlx5_init_shared_data())
146 * Get mlx5 device capabilities.
149 * Pointer to shared device context.
152 * 0 on success, a negative errno value otherwise and rte_errno is set.
155 mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
157 struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
158 struct mlx5_context *mlx5_ctx = sh->cdev->ctx;
159 void *pv_iseg = NULL;
162 MLX5_ASSERT(sh->cdev->config.devx);
163 MLX5_ASSERT(mlx5_dev_is_pci(sh->cdev->dev));
164 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
165 if (pv_iseg == NULL) {
166 DRV_LOG(ERR, "Failed to get device hca_iseg.");
170 memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
171 sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(sh->cdev->dev));
172 sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
173 sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
174 sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
175 sh->dev_cap.dv_flow_en = 1;
176 sh->dev_cap.mps = MLX5_MPW_DISABLED;
177 DRV_LOG(DEBUG, "MPW isn't supported.");
178 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported.");
179 sh->dev_cap.hw_csum = hca_attr->csum_cap;
180 DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
181 (sh->dev_cap.hw_csum ? "" : "not "));
182 sh->dev_cap.hw_vlan_strip = hca_attr->vlan_cap;
183 DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
184 (sh->dev_cap.hw_vlan_strip ? "" : "not "));
185 sh->dev_cap.hw_fcs_strip = hca_attr->scatter_fcs;
186 sh->dev_cap.tso = ((1 << hca_attr->max_lso_cap) > 0);
188 sh->dev_cap.tso_max_payload_sz = 1 << hca_attr->max_lso_cap;
189 DRV_LOG(DEBUG, "Counters are not supported.");
190 if (hca_attr->rss_ind_tbl_cap) {
192 * DPDK doesn't support larger/variable indirection tables.
193 * Once DPDK supports it, take max size from device attr.
195 sh->dev_cap.ind_table_max_size =
196 RTE_MIN(1 << hca_attr->rss_ind_tbl_cap,
197 (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
198 DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
199 sh->dev_cap.ind_table_max_size);
201 sh->dev_cap.swp = mlx5_get_supported_sw_parsing_offloads(hca_attr);
202 sh->dev_cap.tunnel_en = mlx5_get_supported_tunneling_offloads(hca_attr);
203 if (sh->dev_cap.tunnel_en) {
204 DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
205 sh->dev_cap.tunnel_en &
206 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
207 sh->dev_cap.tunnel_en &
208 MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
209 sh->dev_cap.tunnel_en &
210 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
212 DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
214 snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
215 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
216 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
217 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
218 DRV_LOG(DEBUG, "Packet pacing is not supported.");
219 mlx5_rt_timestamp_config(sh, hca_attr);
224 * Initialize DR related data within private structure.
225 * Routine checks the reference counter and does actual
226 * resources creation/initialization only if counter is zero.
229 * Pointer to the private device data structure.
232 * Zero on success, positive error code otherwise.
235 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
237 struct mlx5_dev_ctx_shared *sh = priv->sh;
241 err = mlx5_alloc_table_hash_list(priv);
243 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
244 (void *)sh->flow_tbls);
248 * Destroy DR related data within private structure.
251 * Pointer to the private device data structure.
254 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
256 mlx5_free_table_hash_list(priv);
260 * Set the completion channel file descriptor interrupt as non-blocking.
261 * Currently it has no support under Windows.
264 * Pointer to RQ channel object, which includes the channel fd
267 * The file descriptor (representing the interrupt) used in this channel.
270 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
273 mlx5_os_set_nonblock_channel_fd(int fd)
276 DRV_LOG(WARNING, "%s: is not supported", __func__);
281 * Spawn an Ethernet device from DevX information.
284 * Backing DPDK device.
286 * Verbs device parameters (name, port, switch_info) to spawn.
288 * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
291 * A valid Ethernet device object on success, NULL otherwise and rte_errno
292 * is set. The following errors are defined:
294 * EEXIST: device is already spawned
296 static struct rte_eth_dev *
297 mlx5_dev_spawn(struct rte_device *dpdk_dev,
298 struct mlx5_dev_spawn_data *spawn,
299 struct mlx5_kvargs_ctrl *mkvlist)
301 const struct mlx5_switch_info *switch_info = &spawn->info;
302 struct mlx5_dev_ctx_shared *sh = NULL;
303 struct rte_eth_dev *eth_dev = NULL;
304 struct mlx5_priv *priv = NULL;
306 struct rte_ether_addr mac;
307 char name[RTE_ETH_NAME_MAX_LEN];
308 int own_domain_id = 0;
312 /* Build device name. */
313 strlcpy(name, dpdk_dev->name, sizeof(name));
314 /* check if the device is already spawned */
315 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
319 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
320 sh = mlx5_alloc_shared_dev_ctx(spawn, mkvlist);
323 if (!sh->config.dv_flow_en) {
324 DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
328 if (sh->config.vf_nl_en) {
329 DRV_LOG(DEBUG, "VF netlink isn't supported.");
330 sh->config.vf_nl_en = 0;
332 /* Initialize the shutdown event in mlx5_dev_spawn to
333 * support mlx5_is_removed for Windows.
335 err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
337 DRV_LOG(ERR, "failed to init showdown event: %s",
341 /* Allocate private eth device data. */
342 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
344 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
346 DRV_LOG(ERR, "priv allocation failure");
351 priv->dev_port = spawn->phys_port;
352 priv->pci_dev = spawn->pci_dev;
353 priv->mtu = RTE_ETHER_MTU;
354 priv->mp_id.port_id = port_id;
355 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
356 priv->representor = !!switch_info->representor;
357 priv->master = !!switch_info->master;
358 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
359 priv->vport_meta_tag = 0;
360 priv->vport_meta_mask = 0;
361 priv->pf_bond = spawn->pf_bond;
363 /* representor_id field keeps the unmodified VF index. */
364 priv->representor_id = -1;
366 * Look for sibling devices in order to reuse their switch domain
367 * if any, otherwise allocate one.
369 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
370 const struct mlx5_priv *opriv =
371 rte_eth_devices[port_id].data->dev_private;
374 opriv->sh != priv->sh ||
376 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
378 priv->domain_id = opriv->domain_id;
381 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
382 err = rte_eth_switch_domain_alloc(&priv->domain_id);
385 DRV_LOG(ERR, "unable to allocate switch domain: %s",
386 strerror(rte_errno));
391 /* Process parameters and store port configuration on priv structure. */
392 err = mlx5_port_args_config(priv, mkvlist, &priv->config);
395 DRV_LOG(ERR, "Failed to process port configure: %s",
396 strerror(rte_errno));
399 eth_dev = rte_eth_dev_allocate(name);
400 if (eth_dev == NULL) {
401 DRV_LOG(ERR, "can not allocate rte ethdev");
405 if (priv->representor) {
406 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
407 eth_dev->data->representor_id = priv->representor_id;
408 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
409 struct mlx5_priv *opriv =
410 rte_eth_devices[port_id].data->dev_private;
413 opriv->domain_id == priv->domain_id &&
414 opriv->sh == priv->sh) {
415 eth_dev->data->backer_port_id = port_id;
419 if (port_id >= RTE_MAX_ETHPORTS)
420 eth_dev->data->backer_port_id = eth_dev->data->port_id;
423 * Store associated network device interface index. This index
424 * is permanent throughout the lifetime of device. So, we may store
425 * the ifindex here and use the cached value further.
427 MLX5_ASSERT(spawn->ifindex);
428 priv->if_index = spawn->ifindex;
429 eth_dev->data->dev_private = priv;
430 priv->dev_data = eth_dev->data;
431 eth_dev->data->mac_addrs = priv->mac;
432 eth_dev->device = dpdk_dev;
433 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
434 /* Configure the first MAC address by default. */
435 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
437 "port %u cannot get MAC address, is mlx5_en"
438 " loaded? (errno: %s).",
439 eth_dev->data->port_id, strerror(rte_errno));
444 "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
445 eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
446 #ifdef RTE_LIBRTE_MLX5_DEBUG
448 char ifname[MLX5_NAMESIZE];
450 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
451 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
452 eth_dev->data->port_id, ifname);
454 DRV_LOG(DEBUG, "port %u ifname is unknown.",
455 eth_dev->data->port_id);
458 /* Get actual MTU if possible. */
459 err = mlx5_get_mtu(eth_dev, &priv->mtu);
464 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
466 /* Initialize burst functions to prevent crashes before link-up. */
467 eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
468 eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
469 eth_dev->dev_ops = &mlx5_dev_ops;
470 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
471 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
472 eth_dev->rx_queue_count = mlx5_rx_queue_count;
473 /* Register MAC address. */
474 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
475 priv->ctrl_flows = 0;
476 TAILQ_INIT(&priv->flow_meters);
477 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
478 if (!priv->mtr_profile_tbl)
480 /* Bring Ethernet device up. */
481 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
482 eth_dev->data->port_id);
483 /* nl calls are unsupported - set to -1 not to fail on release */
484 priv->nl_socket_rdma = -1;
485 priv->nl_socket_route = -1;
486 mlx5_set_link_up(eth_dev);
488 * Even though the interrupt handler is not installed yet,
489 * interrupts will still trigger on the async_fd from
490 * Verbs context returned by ibv_open_device().
492 mlx5_link_update(eth_dev, 0);
493 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
494 icfg[i].release_mem_en = !!sh->config.reclaim_mode;
495 if (sh->config.reclaim_mode)
496 icfg[i].per_core_cache = 0;
497 priv->flows[i] = mlx5_ipool_create(&icfg[i]);
501 /* Create context for virtual machine VLAN workaround. */
502 priv->vmwa_context = NULL;
503 if (sh->config.dv_flow_en) {
504 err = mlx5_alloc_shared_dr(priv);
508 /* No supported flow priority number detection. */
509 priv->sh->flow_max_priority = -1;
510 mlx5_set_metadata_mask(eth_dev);
511 if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
512 !priv->sh->dv_regc0_mask) {
513 DRV_LOG(ERR, "metadata mode %u is not supported "
514 "(no metadata reg_c[0] is available).",
515 sh->config.dv_xmeta_en);
519 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
520 mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
521 mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
522 mlx5_hrxq_clone_free_cb);
523 /* Query availability of metadata reg_c's. */
524 if (!priv->sh->metadata_regc_check_flag) {
525 err = mlx5_flow_discover_mreg_c(eth_dev);
531 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
533 "port %u extensive metadata register is not supported.",
534 eth_dev->data->port_id);
535 if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
536 DRV_LOG(ERR, "metadata mode %u is not supported "
537 "(no metadata registers available).",
538 sh->config.dv_xmeta_en);
543 if (sh->cdev->config.devx) {
544 priv->obj_ops = devx_obj_ops;
546 DRV_LOG(ERR, "Windows flow must be DevX.");
550 mlx5_flow_counter_mode_config(eth_dev);
551 mlx5_queue_counter_id_prepare(eth_dev);
555 if (priv->mtr_profile_tbl)
556 mlx5_l3t_destroy(priv->mtr_profile_tbl);
558 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
561 eth_dev->data->dev_private = NULL;
563 if (eth_dev != NULL) {
564 /* mac_addrs must not be freed alone because part of
567 eth_dev->data->mac_addrs = NULL;
568 rte_eth_dev_release_port(eth_dev);
571 mlx5_free_shared_dev_ctx(sh);
572 MLX5_ASSERT(err > 0);
578 * This function should share events between multiple ports of single IB
579 * device. Currently it has no support under Windows.
582 * Pointer to mlx5_dev_ctx_shared object.
585 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
588 DRV_LOG(WARNING, "%s: is not supported", __func__);
592 * This function should share events between multiple ports of single IB
593 * device. Currently it has no support under Windows.
596 * Pointer to mlx5_dev_ctx_shared object.
599 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
602 DRV_LOG(WARNING, "%s: is not supported", __func__);
606 * Read statistics by a named counter.
609 * Pointer to the private device data structure.
610 * @param[in] ctr_name
611 * Pointer to the name of the statistic counter to read
613 * Pointer to read statistic value.
615 * 0 on success and stat is valid, non-zero if failed to read the value
616 * or counter is not supported.
621 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
624 if (priv->q_counters != NULL && strcmp(ctr_name, "out_of_buffer") == 0)
625 return mlx5_devx_cmd_queue_counter_query
626 (priv->q_counters, 0, (uint32_t *)stat);
627 DRV_LOG(WARNING, "%s: is not supported for the %s counter",
633 * Flush device MAC addresses
634 * Currently it has no support under Windows.
637 * Pointer to Ethernet device structure.
641 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
644 DRV_LOG(WARNING, "%s: is not supported", __func__);
648 * Remove a MAC address from device
649 * Currently it has no support under Windows.
652 * Pointer to Ethernet device structure.
657 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
661 DRV_LOG(WARNING, "%s: is not supported", __func__);
665 * Adds a MAC address to the device
666 * Currently it has no support under Windows.
669 * Pointer to Ethernet device structure.
671 * MAC address to register.
676 * 0 on success, a negative errno value otherwise
679 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
683 struct rte_ether_addr lmac;
685 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
687 "port %u cannot get MAC address, is mlx5_en"
688 " loaded? (errno: %s)",
689 dev->data->port_id, strerror(rte_errno));
692 if (!rte_is_same_ether_addr(&lmac, mac)) {
694 "adding new mac address to device is unsupported");
701 * Modify a VF MAC address
702 * Currently it has no support under Windows.
705 * Pointer to device private data.
707 * MAC address to modify into.
709 * Net device interface index
714 * 0 on success, a negative errno value otherwise
717 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
718 unsigned int iface_idx,
719 struct rte_ether_addr *mac_addr,
726 DRV_LOG(WARNING, "%s: is not supported", __func__);
731 * Set device promiscuous mode
734 * Pointer to Ethernet device structure.
736 * 0 - promiscuous is disabled, otherwise - enabled
739 * 0 on success, a negative error value otherwise
742 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
744 struct mlx5_priv *priv = dev->data->dev_private;
746 return mlx5_glue->devx_set_promisc_vport(priv->sh->cdev->ctx, ALL_PROMISC, enable);
750 * Set device allmulti mode
753 * Pointer to Ethernet device structure.
755 * 0 - all multicase is disabled, otherwise - enabled
758 * 0 on success, a negative error value otherwise
761 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
763 struct mlx5_priv *priv = dev->data->dev_private;
765 return mlx5_glue->devx_set_promisc_vport(priv->sh->cdev->ctx, MC_PROMISC, enable);
769 * DPDK callback to register a PCI device.
771 * This function spawns Ethernet devices out of a given device.
774 * Pointer to the common device.
775 * @param[in, out] mkvlist
776 * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
779 * 0 on success, a negative errno value otherwise and rte_errno is set.
782 mlx5_os_net_probe(struct mlx5_common_device *cdev,
783 struct mlx5_kvargs_ctrl *mkvlist)
785 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
786 struct mlx5_dev_spawn_data spawn = {
790 .phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
793 .ifindex = -1, /* Spawn will assign */
794 .info = (struct mlx5_switch_info){
795 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
801 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
802 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
805 ret = mlx5_init_once();
807 DRV_LOG(ERR, "unable to init PMD global data: %s",
808 strerror(rte_errno));
811 spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, mkvlist);
814 restore = spawn.eth_dev->data->dev_flags;
815 rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
816 /* Restore non-PCI flags cleared by the above call. */
817 spawn.eth_dev->data->dev_flags |= restore;
818 rte_eth_dev_probing_finish(spawn.eth_dev);
823 * Cleanup resources when the last device is closed.
826 mlx5_os_net_cleanup(void)
830 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};