1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
28 #include "mlx5_autoconf.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_devx.h"
32 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
34 /* Spinlock for mlx5_shared_data allocation. */
35 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
37 /* rte flow indexed pool configuration. */
38 static struct mlx5_indexed_pool_config icfg[] = {
40 .size = sizeof(struct rte_flow),
44 .malloc = mlx5_malloc,
47 .type = "ctl_flow_ipool",
50 .size = sizeof(struct rte_flow),
56 .malloc = mlx5_malloc,
58 .per_core_cache = 1 << 14,
59 .type = "rte_flow_ipool",
62 .size = sizeof(struct rte_flow),
68 .malloc = mlx5_malloc,
71 .type = "mcp_flow_ipool",
76 mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
78 struct mlx5_priv *priv = dev->data->dev_private;
79 void *ctx = priv->sh->cdev->ctx;
81 priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
82 if (!priv->q_counters) {
83 DRV_LOG(ERR, "Port %d queue counter object cannot be created "
84 "by DevX - imissed counter will be unavailable",
88 priv->counter_set_id = priv->q_counters->id;
92 * Initialize shared data between primary and secondary process.
94 * A memzone is reserved by primary process and secondary processes attach to
98 * 0 on success, a negative errno value otherwise and rte_errno is set.
101 mlx5_init_shared_data(void)
103 const struct rte_memzone *mz;
106 rte_spinlock_lock(&mlx5_shared_data_lock);
107 if (mlx5_shared_data == NULL) {
108 /* Allocate shared memory. */
109 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
110 sizeof(*mlx5_shared_data),
114 "Cannot allocate mlx5 shared data");
118 mlx5_shared_data = mz->addr;
119 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
120 rte_spinlock_init(&mlx5_shared_data->lock);
123 rte_spinlock_unlock(&mlx5_shared_data_lock);
128 * PMD global initialization.
130 * Independent from individual device, this function initializes global
131 * per-PMD data structures distinguishing primary and secondary processes.
132 * Hence, each initialization is called once per a process.
135 * 0 on success, a negative errno value otherwise and rte_errno is set.
140 if (mlx5_init_shared_data())
146 * Get mlx5 device attributes.
149 * Pointer to mlx5 device.
152 * Pointer to mlx5 device attributes.
155 * 0 on success, non zero error number otherwise.
158 mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
159 struct mlx5_dev_attr *device_attr)
161 struct mlx5_context *mlx5_ctx;
162 void *pv_iseg = NULL;
166 if (!cdev || !cdev->ctx)
168 mlx5_ctx = (struct mlx5_context *)cdev->ctx;
169 memset(device_attr, 0, sizeof(*device_attr));
170 device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
171 device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
172 device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
173 device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
174 device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
175 device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
176 device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
177 device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
178 device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap;
179 if (cdev->config.hca_attr.rss_ind_tbl_cap) {
180 device_attr->max_rwq_indirection_table_size =
181 1 << cdev->config.hca_attr.rss_ind_tbl_cap;
183 device_attr->sw_parsing_offloads =
184 mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr);
185 device_attr->tunnel_offloads_caps =
186 mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr);
187 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
188 if (pv_iseg == NULL) {
189 DRV_LOG(ERR, "Failed to get device hca_iseg");
193 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
194 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
195 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
196 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
202 * Initialize DR related data within private structure.
203 * Routine checks the reference counter and does actual
204 * resources creation/initialization only if counter is zero.
207 * Pointer to the private device data structure.
210 * Zero on success, positive error code otherwise.
213 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
215 struct mlx5_dev_ctx_shared *sh = priv->sh;
219 err = mlx5_alloc_table_hash_list(priv);
221 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
222 (void *)sh->flow_tbls);
226 * Destroy DR related data within private structure.
229 * Pointer to the private device data structure.
232 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
234 mlx5_free_table_hash_list(priv);
238 * Set the completion channel file descriptor interrupt as non-blocking.
239 * Currently it has no support under Windows.
242 * Pointer to RQ channel object, which includes the channel fd
245 * The file descriptor (representing the interrupt) used in this channel.
248 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
251 mlx5_os_set_nonblock_channel_fd(int fd)
254 DRV_LOG(WARNING, "%s: is not supported", __func__);
259 * DV flow counter mode detect and config.
262 * Pointer to rte_eth_dev structure.
266 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
268 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
269 struct mlx5_priv *priv = dev->data->dev_private;
270 struct mlx5_dev_ctx_shared *sh = priv->sh;
273 #ifndef HAVE_IBV_DEVX_ASYNC
277 if (!sh->devx || !priv->config.dv_flow_en ||
278 !priv->config.hca_attr.flow_counters_dump ||
279 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
280 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
284 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
285 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
286 priv->config.hca_attr.flow_counters_dump,
287 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
288 /* Initialize fallback mode only on the port initializes sh. */
290 sh->cmng.counter_fallback = fallback;
291 else if (fallback != sh->cmng.counter_fallback)
292 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
293 "with others:%d.", PORT_ID(priv), fallback);
298 * Spawn an Ethernet device from DevX information.
301 * Backing DPDK device.
303 * Verbs device parameters (name, port, switch_info) to spawn.
305 * Device configuration parameters.
308 * A valid Ethernet device object on success, NULL otherwise and rte_errno
309 * is set. The following errors are defined:
311 * EEXIST: device is already spawned
313 static struct rte_eth_dev *
314 mlx5_dev_spawn(struct rte_device *dpdk_dev,
315 struct mlx5_dev_spawn_data *spawn,
316 struct mlx5_dev_config *config)
318 const struct mlx5_switch_info *switch_info = &spawn->info;
319 struct mlx5_dev_ctx_shared *sh = NULL;
320 struct mlx5_dev_attr device_attr;
321 struct rte_eth_dev *eth_dev = NULL;
322 struct mlx5_priv *priv = NULL;
324 unsigned int cqe_comp;
325 struct rte_ether_addr mac;
326 char name[RTE_ETH_NAME_MAX_LEN];
327 int own_domain_id = 0;
331 /* Build device name. */
332 strlcpy(name, dpdk_dev->name, sizeof(name));
333 /* check if the device is already spawned */
334 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
338 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
340 * Some parameters are needed in advance to create device context. We
341 * process the devargs here to get ones, and later process devargs
342 * again to override some hardware settings.
344 err = mlx5_args(config, dpdk_dev->devargs);
347 DRV_LOG(ERR, "failed to process device arguments: %s",
348 strerror(rte_errno));
351 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
354 /* Initialize the shutdown event in mlx5_dev_spawn to
355 * support mlx5_is_removed for Windows.
357 err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
359 DRV_LOG(ERR, "failed to init showdown event: %s",
363 DRV_LOG(DEBUG, "MPW isn't supported");
364 mlx5_os_get_dev_attr(sh->cdev, &device_attr);
365 config->swp = device_attr.sw_parsing_offloads &
366 (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
367 MLX5_SW_PARSING_TSO_CAP);
368 config->ind_table_max_size =
369 sh->device_attr.max_rwq_indirection_table_size;
371 config->cqe_comp = cqe_comp;
372 config->tunnel_en = device_attr.tunnel_offloads_caps &
373 (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
374 MLX5_TUNNELED_OFFLOADS_GRE_CAP |
375 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
376 if (config->tunnel_en) {
377 DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
379 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
381 MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
383 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
386 DRV_LOG(DEBUG, "tunnel offloading is not supported");
388 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
390 /* Allocate private eth device data. */
391 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
393 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
395 DRV_LOG(ERR, "priv allocation failure");
400 priv->dev_port = spawn->phys_port;
401 priv->pci_dev = spawn->pci_dev;
402 priv->mtu = RTE_ETHER_MTU;
403 priv->mp_id.port_id = port_id;
404 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
405 priv->representor = !!switch_info->representor;
406 priv->master = !!switch_info->master;
407 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
408 priv->vport_meta_tag = 0;
409 priv->vport_meta_mask = 0;
410 priv->pf_bond = spawn->pf_bond;
412 /* representor_id field keeps the unmodified VF index. */
413 priv->representor_id = -1;
415 * Look for sibling devices in order to reuse their switch domain
416 * if any, otherwise allocate one.
418 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
419 const struct mlx5_priv *opriv =
420 rte_eth_devices[port_id].data->dev_private;
423 opriv->sh != priv->sh ||
425 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
427 priv->domain_id = opriv->domain_id;
430 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
431 err = rte_eth_switch_domain_alloc(&priv->domain_id);
434 DRV_LOG(ERR, "unable to allocate switch domain: %s",
435 strerror(rte_errno));
440 /* Override some values set by hardware configuration. */
441 mlx5_args(config, dpdk_dev->devargs);
442 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
445 DRV_LOG(DEBUG, "counters are not supported");
446 config->ind_table_max_size =
447 sh->device_attr.max_rwq_indirection_table_size;
449 * Remove this check once DPDK supports larger/variable
450 * indirection tables.
452 if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
453 config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
454 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
455 config->ind_table_max_size);
456 if (config->hw_padding) {
457 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
458 config->hw_padding = 0;
460 config->tso = (sh->device_attr.max_tso > 0);
462 config->tso_max_payload_sz = sh->device_attr.max_tso;
463 DRV_LOG(DEBUG, "%sMPS is %s.",
464 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
465 config->mps == MLX5_MPW ? "legacy " : "",
466 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
467 if (config->cqe_comp && !cqe_comp) {
468 DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
469 config->cqe_comp = 0;
472 config->hca_attr = sh->cdev->config.hca_attr;
473 config->hw_csum = config->hca_attr.csum_cap;
474 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
475 (config->hw_csum ? "" : "not "));
476 config->hw_vlan_strip = config->hca_attr.vlan_cap;
477 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
478 (config->hw_vlan_strip ? "" : "not "));
479 config->hw_fcs_strip = config->hca_attr.scatter_fcs;
482 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
484 err = config->hca_attr.access_register_user ?
485 mlx5_devx_cmd_register_read
486 (sh->cdev->ctx, MLX5_REGISTER_ID_MTUTC, 0,
487 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
491 /* MTUTC register is read successfully. */
492 ts_mode = MLX5_GET(register_mtutc, reg,
494 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
495 config->rt_timestamp = 1;
497 /* Kernel does not support register reading. */
498 if (config->hca_attr.dev_freq_khz ==
499 (NS_PER_S / MS_PER_S))
500 config->rt_timestamp = 1;
503 if (config->mprq.enabled) {
504 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
505 config->mprq.enabled = 0;
507 if (config->max_dump_files_num == 0)
508 config->max_dump_files_num = 128;
509 eth_dev = rte_eth_dev_allocate(name);
510 if (eth_dev == NULL) {
511 DRV_LOG(ERR, "can not allocate rte ethdev");
515 if (priv->representor) {
516 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
517 eth_dev->data->representor_id = priv->representor_id;
518 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
519 struct mlx5_priv *opriv =
520 rte_eth_devices[port_id].data->dev_private;
523 opriv->domain_id == priv->domain_id &&
524 opriv->sh == priv->sh) {
525 eth_dev->data->backer_port_id = port_id;
529 if (port_id >= RTE_MAX_ETHPORTS)
530 eth_dev->data->backer_port_id = eth_dev->data->port_id;
533 * Store associated network device interface index. This index
534 * is permanent throughout the lifetime of device. So, we may store
535 * the ifindex here and use the cached value further.
537 MLX5_ASSERT(spawn->ifindex);
538 priv->if_index = spawn->ifindex;
539 eth_dev->data->dev_private = priv;
540 priv->dev_data = eth_dev->data;
541 eth_dev->data->mac_addrs = priv->mac;
542 eth_dev->device = dpdk_dev;
543 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
544 /* Configure the first MAC address by default. */
545 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
547 "port %u cannot get MAC address, is mlx5_en"
548 " loaded? (errno: %s).",
549 eth_dev->data->port_id, strerror(rte_errno));
554 "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
555 eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
556 #ifdef RTE_LIBRTE_MLX5_DEBUG
558 char ifname[MLX5_NAMESIZE];
560 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
561 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
562 eth_dev->data->port_id, ifname);
564 DRV_LOG(DEBUG, "port %u ifname is unknown.",
565 eth_dev->data->port_id);
568 /* Get actual MTU if possible. */
569 err = mlx5_get_mtu(eth_dev, &priv->mtu);
574 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
576 /* Initialize burst functions to prevent crashes before link-up. */
577 eth_dev->rx_pkt_burst = removed_rx_burst;
578 eth_dev->tx_pkt_burst = removed_tx_burst;
579 eth_dev->dev_ops = &mlx5_dev_ops;
580 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
581 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
582 eth_dev->rx_queue_count = mlx5_rx_queue_count;
583 /* Register MAC address. */
584 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
585 priv->ctrl_flows = 0;
586 TAILQ_INIT(&priv->flow_meters);
587 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
588 if (!priv->mtr_profile_tbl)
590 /* Bring Ethernet device up. */
591 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
592 eth_dev->data->port_id);
593 /* nl calls are unsupported - set to -1 not to fail on release */
594 priv->nl_socket_rdma = -1;
595 priv->nl_socket_route = -1;
596 mlx5_set_link_up(eth_dev);
598 * Even though the interrupt handler is not installed yet,
599 * interrupts will still trigger on the async_fd from
600 * Verbs context returned by ibv_open_device().
602 mlx5_link_update(eth_dev, 0);
603 config->dv_esw_en = 0;
604 /* Detect minimal data bytes to inline. */
605 mlx5_set_min_inline(spawn, config);
606 /* Store device configuration on private structure. */
607 priv->config = *config;
608 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
609 icfg[i].release_mem_en = !!config->reclaim_mode;
610 if (config->reclaim_mode)
611 icfg[i].per_core_cache = 0;
612 priv->flows[i] = mlx5_ipool_create(&icfg[i]);
616 /* Create context for virtual machine VLAN workaround. */
617 priv->vmwa_context = NULL;
618 if (config->dv_flow_en) {
619 err = mlx5_alloc_shared_dr(priv);
623 /* No supported flow priority number detection. */
624 priv->sh->flow_max_priority = -1;
625 if (!priv->config.dv_esw_en &&
626 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
627 DRV_LOG(WARNING, "metadata mode %u is not supported "
628 "(no E-Switch)", priv->config.dv_xmeta_en);
629 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
631 mlx5_set_metadata_mask(eth_dev);
632 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
633 !priv->sh->dv_regc0_mask) {
634 DRV_LOG(ERR, "metadata mode %u is not supported "
635 "(no metadata reg_c[0] is available).",
636 priv->config.dv_xmeta_en);
640 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
641 mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
642 mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
643 mlx5_hrxq_clone_free_cb);
644 /* Query availability of metadata reg_c's. */
645 if (!priv->sh->metadata_regc_check_flag) {
646 err = mlx5_flow_discover_mreg_c(eth_dev);
652 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
654 "port %u extensive metadata register is not supported.",
655 eth_dev->data->port_id);
656 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
657 DRV_LOG(ERR, "metadata mode %u is not supported "
658 "(no metadata registers available).",
659 priv->config.dv_xmeta_en);
664 if (sh->devx && config->dv_flow_en) {
665 priv->obj_ops = devx_obj_ops;
667 DRV_LOG(ERR, "Flow mode %u is not supported "
668 "(Windows flow must be DevX with DV flow enabled).",
669 priv->config.dv_flow_en);
673 mlx5_flow_counter_mode_config(eth_dev);
674 mlx5_queue_counter_id_prepare(eth_dev);
678 if (priv->mtr_profile_tbl)
679 mlx5_l3t_destroy(priv->mtr_profile_tbl);
681 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
684 eth_dev->data->dev_private = NULL;
686 if (eth_dev != NULL) {
687 /* mac_addrs must not be freed alone because part of
690 eth_dev->data->mac_addrs = NULL;
691 rte_eth_dev_release_port(eth_dev);
694 mlx5_free_shared_dev_ctx(sh);
695 MLX5_ASSERT(err > 0);
701 * This function should share events between multiple ports of single IB
702 * device. Currently it has no support under Windows.
705 * Pointer to mlx5_dev_ctx_shared object.
708 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
711 DRV_LOG(WARNING, "%s: is not supported", __func__);
715 * This function should share events between multiple ports of single IB
716 * device. Currently it has no support under Windows.
719 * Pointer to mlx5_dev_ctx_shared object.
722 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
725 DRV_LOG(WARNING, "%s: is not supported", __func__);
729 * Read statistics by a named counter.
732 * Pointer to the private device data structure.
733 * @param[in] ctr_name
734 * Pointer to the name of the statistic counter to read
736 * Pointer to read statistic value.
738 * 0 on success and stat is valid, non-zero if failed to read the value
739 * or counter is not supported.
744 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
747 if (priv->q_counters != NULL && strcmp(ctr_name, "out_of_buffer") == 0)
748 return mlx5_devx_cmd_queue_counter_query
749 (priv->q_counters, 0, (uint32_t *)stat);
750 DRV_LOG(WARNING, "%s: is not supported for the %s counter",
756 * Flush device MAC addresses
757 * Currently it has no support under Windows.
760 * Pointer to Ethernet device structure.
764 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
767 DRV_LOG(WARNING, "%s: is not supported", __func__);
771 * Remove a MAC address from device
772 * Currently it has no support under Windows.
775 * Pointer to Ethernet device structure.
780 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
784 DRV_LOG(WARNING, "%s: is not supported", __func__);
788 * Adds a MAC address to the device
789 * Currently it has no support under Windows.
792 * Pointer to Ethernet device structure.
794 * MAC address to register.
799 * 0 on success, a negative errno value otherwise
802 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
806 struct rte_ether_addr lmac;
808 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
810 "port %u cannot get MAC address, is mlx5_en"
811 " loaded? (errno: %s)",
812 dev->data->port_id, strerror(rte_errno));
815 if (!rte_is_same_ether_addr(&lmac, mac)) {
817 "adding new mac address to device is unsupported");
824 * Modify a VF MAC address
825 * Currently it has no support under Windows.
828 * Pointer to device private data.
830 * MAC address to modify into.
832 * Net device interface index
837 * 0 on success, a negative errno value otherwise
840 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
841 unsigned int iface_idx,
842 struct rte_ether_addr *mac_addr,
849 DRV_LOG(WARNING, "%s: is not supported", __func__);
854 * Set device promiscuous mode
855 * Currently it has no support under Windows.
858 * Pointer to Ethernet device structure.
860 * 0 - promiscuous is disabled, otherwise - enabled
863 * 0 on success, a negative error value otherwise
866 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
870 DRV_LOG(WARNING, "%s: is not supported", __func__);
875 * Set device allmulti mode
878 * Pointer to Ethernet device structure.
880 * 0 - all multicase is disabled, otherwise - enabled
883 * 0 on success, a negative error value otherwise
886 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
890 DRV_LOG(WARNING, "%s: is not supported", __func__);
895 * DPDK callback to register a PCI device.
897 * This function spawns Ethernet devices out of a given device.
900 * Pointer to the common device.
903 * 0 on success, a negative errno value otherwise and rte_errno is set.
906 mlx5_os_net_probe(struct mlx5_common_device *cdev)
908 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
909 struct mlx5_dev_spawn_data spawn = {
913 .phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
916 .ifindex = -1, /* Spawn will assign */
917 .info = (struct mlx5_switch_info){
918 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
921 struct mlx5_dev_config dev_config = {
923 .txq_inline_max = MLX5_ARG_UNSET,
924 .txq_inline_min = MLX5_ARG_UNSET,
925 .txq_inline_mpw = MLX5_ARG_UNSET,
926 .txqs_inline = MLX5_ARG_UNSET,
928 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
929 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
932 .log_hp_size = MLX5_ARG_UNSET,
937 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
938 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
941 ret = mlx5_init_once();
943 DRV_LOG(ERR, "unable to init PMD global data: %s",
944 strerror(rte_errno));
947 /* Device specific configuration. */
948 switch (pci_dev->id.device_id) {
949 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
950 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
951 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
952 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
953 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
954 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
955 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
962 spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
965 restore = spawn.eth_dev->data->dev_flags;
966 rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
967 /* Restore non-PCI flags cleared by the above call. */
968 spawn.eth_dev->data->dev_flags |= restore;
969 rte_eth_dev_probing_finish(spawn.eth_dev);
974 * Cleanup resources when the last device is closed.
977 mlx5_os_net_cleanup(void)
981 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};