1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
28 #include "mlx5_autoconf.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_devx.h"
33 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
35 /* Spinlock for mlx5_shared_data allocation. */
36 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
38 /* rte flow indexed pool configuration. */
39 static struct mlx5_indexed_pool_config icfg[] = {
41 .size = sizeof(struct rte_flow),
45 .malloc = mlx5_malloc,
48 .type = "ctl_flow_ipool",
51 .size = sizeof(struct rte_flow),
57 .malloc = mlx5_malloc,
59 .per_core_cache = 1 << 14,
60 .type = "rte_flow_ipool",
63 .size = sizeof(struct rte_flow),
69 .malloc = mlx5_malloc,
72 .type = "mcp_flow_ipool",
77 * Initialize shared data between primary and secondary process.
79 * A memzone is reserved by primary process and secondary processes attach to
83 * 0 on success, a negative errno value otherwise and rte_errno is set.
86 mlx5_init_shared_data(void)
88 const struct rte_memzone *mz;
91 rte_spinlock_lock(&mlx5_shared_data_lock);
92 if (mlx5_shared_data == NULL) {
93 /* Allocate shared memory. */
94 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
95 sizeof(*mlx5_shared_data),
99 "Cannot allocate mlx5 shared data");
103 mlx5_shared_data = mz->addr;
104 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
105 rte_spinlock_init(&mlx5_shared_data->lock);
108 rte_spinlock_unlock(&mlx5_shared_data_lock);
113 * PMD global initialization.
115 * Independent from individual device, this function initializes global
116 * per-PMD data structures distinguishing primary and secondary processes.
117 * Hence, each initialization is called once per a process.
120 * 0 on success, a negative errno value otherwise and rte_errno is set.
125 struct mlx5_shared_data *sd;
127 if (mlx5_init_shared_data())
129 sd = mlx5_shared_data;
130 rte_spinlock_lock(&sd->lock);
132 if (!sd->init_done) {
133 LIST_INIT(&sd->mem_event_cb_list);
134 rte_rwlock_init(&sd->mem_event_rwlock);
135 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
136 mlx5_mr_mem_event_cb, NULL);
137 sd->init_done = true;
139 rte_spinlock_unlock(&sd->lock);
144 * Get mlx5 device attributes.
147 * Pointer to device context.
150 * Pointer to mlx5 device attributes.
153 * 0 on success, non zero error number otherwise
156 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
158 struct mlx5_context *mlx5_ctx;
159 struct mlx5_hca_attr hca_attr;
160 void *pv_iseg = NULL;
166 mlx5_ctx = (struct mlx5_context *)ctx;
167 memset(device_attr, 0, sizeof(*device_attr));
168 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
170 DRV_LOG(ERR, "Failed to get device hca_cap");
173 device_attr->max_cq = 1 << hca_attr.log_max_cq;
174 device_attr->max_qp = 1 << hca_attr.log_max_qp;
175 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
176 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
177 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
178 device_attr->max_pd = 1 << hca_attr.log_max_pd;
179 device_attr->max_srq = 1 << hca_attr.log_max_srq;
180 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
181 device_attr->max_tso = 1 << hca_attr.max_lso_cap;
182 if (hca_attr.rss_ind_tbl_cap) {
183 device_attr->max_rwq_indirection_table_size =
184 1 << hca_attr.rss_ind_tbl_cap;
186 device_attr->sw_parsing_offloads =
187 mlx5_get_supported_sw_parsing_offloads(&hca_attr);
188 device_attr->tunnel_offloads_caps =
189 mlx5_get_supported_tunneling_offloads(&hca_attr);
190 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
191 if (pv_iseg == NULL) {
192 DRV_LOG(ERR, "Failed to get device hca_iseg");
196 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
197 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
198 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
199 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
205 * Initialize DR related data within private structure.
206 * Routine checks the reference counter and does actual
207 * resources creation/initialization only if counter is zero.
210 * Pointer to the private device data structure.
213 * Zero on success, positive error code otherwise.
216 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
218 struct mlx5_dev_ctx_shared *sh = priv->sh;
222 err = mlx5_alloc_table_hash_list(priv);
224 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
225 (void *)sh->flow_tbls);
229 * Destroy DR related data within private structure.
232 * Pointer to the private device data structure.
235 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
237 mlx5_free_table_hash_list(priv);
241 * Set the completion channel file descriptor interrupt as non-blocking.
242 * Currently it has no support under Windows.
245 * Pointer to RQ channel object, which includes the channel fd
248 * The file descriptor (representing the intetrrupt) used in this channel.
251 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
254 mlx5_os_set_nonblock_channel_fd(int fd)
257 DRV_LOG(WARNING, "%s: is not supported", __func__);
262 * DV flow counter mode detect and config.
265 * Pointer to rte_eth_dev structure.
269 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
271 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
272 struct mlx5_priv *priv = dev->data->dev_private;
273 struct mlx5_dev_ctx_shared *sh = priv->sh;
276 #ifndef HAVE_IBV_DEVX_ASYNC
280 if (!priv->config.devx || !priv->config.dv_flow_en ||
281 !priv->config.hca_attr.flow_counters_dump ||
282 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
283 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
287 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
288 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
289 priv->config.hca_attr.flow_counters_dump,
290 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
291 /* Initialize fallback mode only on the port initializes sh. */
293 sh->cmng.counter_fallback = fallback;
294 else if (fallback != sh->cmng.counter_fallback)
295 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
296 "with others:%d.", PORT_ID(priv), fallback);
301 * Spawn an Ethernet device from DevX information.
304 * Backing DPDK device.
306 * Verbs device parameters (name, port, switch_info) to spawn.
308 * Device configuration parameters.
311 * A valid Ethernet device object on success, NULL otherwise and rte_errno
312 * is set. The following errors are defined:
314 * EEXIST: device is already spawned
316 static struct rte_eth_dev *
317 mlx5_dev_spawn(struct rte_device *dpdk_dev,
318 struct mlx5_dev_spawn_data *spawn,
319 struct mlx5_dev_config *config)
321 const struct mlx5_switch_info *switch_info = &spawn->info;
322 struct mlx5_dev_ctx_shared *sh = NULL;
323 struct mlx5_dev_attr device_attr;
324 struct rte_eth_dev *eth_dev = NULL;
325 struct mlx5_priv *priv = NULL;
327 unsigned int cqe_comp;
328 struct rte_ether_addr mac;
329 char name[RTE_ETH_NAME_MAX_LEN];
330 int own_domain_id = 0;
334 /* Build device name. */
335 strlcpy(name, dpdk_dev->name, sizeof(name));
336 /* check if the device is already spawned */
337 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
341 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
343 * Some parameters are needed in advance to create device context. We
344 * process the devargs here to get ones, and later process devargs
345 * again to override some hardware settings.
347 err = mlx5_args(config, dpdk_dev->devargs);
350 DRV_LOG(ERR, "failed to process device arguments: %s",
351 strerror(rte_errno));
354 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
357 config->devx = sh->devx;
358 /* Initialize the shutdown event in mlx5_dev_spawn to
359 * support mlx5_is_removed for Windows.
361 err = mlx5_glue->devx_init_showdown_event(sh->ctx);
363 DRV_LOG(ERR, "failed to init showdown event: %s",
367 DRV_LOG(DEBUG, "MPW isn't supported");
368 mlx5_os_get_dev_attr(sh->ctx, &device_attr);
369 config->swp = device_attr.sw_parsing_offloads &
370 (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
371 MLX5_SW_PARSING_TSO_CAP);
372 config->ind_table_max_size =
373 sh->device_attr.max_rwq_indirection_table_size;
375 config->cqe_comp = cqe_comp;
376 config->tunnel_en = device_attr.tunnel_offloads_caps &
377 (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
378 MLX5_TUNNELED_OFFLOADS_GRE_CAP |
379 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
380 if (config->tunnel_en) {
381 DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
383 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
385 MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
387 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
390 DRV_LOG(DEBUG, "tunnel offloading is not supported");
392 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
394 /* Allocate private eth device data. */
395 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
397 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
399 DRV_LOG(ERR, "priv allocation failure");
404 priv->dev_port = spawn->phys_port;
405 priv->pci_dev = spawn->pci_dev;
406 priv->mtu = RTE_ETHER_MTU;
407 priv->mp_id.port_id = port_id;
408 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
409 priv->representor = !!switch_info->representor;
410 priv->master = !!switch_info->master;
411 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
412 priv->vport_meta_tag = 0;
413 priv->vport_meta_mask = 0;
414 priv->pf_bond = spawn->pf_bond;
416 /* representor_id field keeps the unmodified VF index. */
417 priv->representor_id = -1;
419 * Look for sibling devices in order to reuse their switch domain
420 * if any, otherwise allocate one.
422 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
423 const struct mlx5_priv *opriv =
424 rte_eth_devices[port_id].data->dev_private;
427 opriv->sh != priv->sh ||
429 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
431 priv->domain_id = opriv->domain_id;
434 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
435 err = rte_eth_switch_domain_alloc(&priv->domain_id);
438 DRV_LOG(ERR, "unable to allocate switch domain: %s",
439 strerror(rte_errno));
444 /* Override some values set by hardware configuration. */
445 mlx5_args(config, dpdk_dev->devargs);
446 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev);
449 DRV_LOG(DEBUG, "counters are not supported");
450 config->ind_table_max_size =
451 sh->device_attr.max_rwq_indirection_table_size;
453 * Remove this check once DPDK supports larger/variable
454 * indirection tables.
456 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
457 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
458 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
459 config->ind_table_max_size);
460 if (config->hw_padding) {
461 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
462 config->hw_padding = 0;
464 config->tso = (sh->device_attr.max_tso > 0);
466 config->tso_max_payload_sz = sh->device_attr.max_tso;
467 DRV_LOG(DEBUG, "%sMPS is %s.",
468 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
469 config->mps == MLX5_MPW ? "legacy " : "",
470 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
471 if (config->cqe_comp && !cqe_comp) {
472 DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
473 config->cqe_comp = 0;
476 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
481 /* Check relax ordering support. */
482 sh->cmng.relaxed_ordering_read = 0;
483 sh->cmng.relaxed_ordering_write = 0;
484 if (!haswell_broadwell_cpu) {
485 sh->cmng.relaxed_ordering_write =
486 config->hca_attr.relaxed_ordering_write;
487 sh->cmng.relaxed_ordering_read =
488 config->hca_attr.relaxed_ordering_read;
490 config->hw_csum = config->hca_attr.csum_cap;
491 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
492 (config->hw_csum ? "" : "not "));
493 config->hw_vlan_strip = config->hca_attr.vlan_cap;
494 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
495 (config->hw_vlan_strip ? "" : "not "));
496 config->hw_fcs_strip = config->hca_attr.scatter_fcs;
499 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
501 err = config->hca_attr.access_register_user ?
502 mlx5_devx_cmd_register_read
503 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
504 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
508 /* MTUTC register is read successfully. */
509 ts_mode = MLX5_GET(register_mtutc, reg,
511 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
512 config->rt_timestamp = 1;
514 /* Kernel does not support register reading. */
515 if (config->hca_attr.dev_freq_khz ==
516 (NS_PER_S / MS_PER_S))
517 config->rt_timestamp = 1;
519 sh->rq_ts_format = config->hca_attr.rq_ts_format;
520 sh->sq_ts_format = config->hca_attr.sq_ts_format;
521 sh->qp_ts_format = config->hca_attr.qp_ts_format;
523 if (config->mprq.enabled) {
524 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
525 config->mprq.enabled = 0;
527 if (config->max_dump_files_num == 0)
528 config->max_dump_files_num = 128;
529 eth_dev = rte_eth_dev_allocate(name);
530 if (eth_dev == NULL) {
531 DRV_LOG(ERR, "can not allocate rte ethdev");
535 if (priv->representor) {
536 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
537 eth_dev->data->representor_id = priv->representor_id;
538 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
539 struct mlx5_priv *opriv =
540 rte_eth_devices[port_id].data->dev_private;
543 opriv->domain_id == priv->domain_id &&
544 opriv->sh == priv->sh) {
545 eth_dev->data->backer_port_id = port_id;
549 if (port_id >= RTE_MAX_ETHPORTS)
550 eth_dev->data->backer_port_id = eth_dev->data->port_id;
553 * Store associated network device interface index. This index
554 * is permanent throughout the lifetime of device. So, we may store
555 * the ifindex here and use the cached value further.
557 MLX5_ASSERT(spawn->ifindex);
558 priv->if_index = spawn->ifindex;
559 eth_dev->data->dev_private = priv;
560 priv->dev_data = eth_dev->data;
561 eth_dev->data->mac_addrs = priv->mac;
562 eth_dev->device = dpdk_dev;
563 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
564 /* Configure the first MAC address by default. */
565 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
567 "port %u cannot get MAC address, is mlx5_en"
568 " loaded? (errno: %s).",
569 eth_dev->data->port_id, strerror(rte_errno));
574 "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
575 eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
576 #ifdef RTE_LIBRTE_MLX5_DEBUG
578 char ifname[MLX5_NAMESIZE];
580 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
581 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
582 eth_dev->data->port_id, ifname);
584 DRV_LOG(DEBUG, "port %u ifname is unknown.",
585 eth_dev->data->port_id);
588 /* Get actual MTU if possible. */
589 err = mlx5_get_mtu(eth_dev, &priv->mtu);
594 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
596 /* Initialize burst functions to prevent crashes before link-up. */
597 eth_dev->rx_pkt_burst = removed_rx_burst;
598 eth_dev->tx_pkt_burst = removed_tx_burst;
599 eth_dev->dev_ops = &mlx5_dev_ops;
600 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
601 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
602 eth_dev->rx_queue_count = mlx5_rx_queue_count;
603 /* Register MAC address. */
604 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
605 priv->ctrl_flows = 0;
606 TAILQ_INIT(&priv->flow_meters);
607 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
608 if (!priv->mtr_profile_tbl)
610 /* Bring Ethernet device up. */
611 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
612 eth_dev->data->port_id);
613 /* nl calls are unsupported - set to -1 not to fail on release */
614 priv->nl_socket_rdma = -1;
615 priv->nl_socket_route = -1;
616 mlx5_set_link_up(eth_dev);
618 * Even though the interrupt handler is not installed yet,
619 * interrupts will still trigger on the async_fd from
620 * Verbs context returned by ibv_open_device().
622 mlx5_link_update(eth_dev, 0);
623 config->dv_esw_en = 0;
624 /* Detect minimal data bytes to inline. */
625 mlx5_set_min_inline(spawn, config);
626 /* Store device configuration on private structure. */
627 priv->config = *config;
628 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
629 icfg[i].release_mem_en = !!config->reclaim_mode;
630 if (config->reclaim_mode)
631 icfg[i].per_core_cache = 0;
632 priv->flows[i] = mlx5_ipool_create(&icfg[i]);
636 /* Create context for virtual machine VLAN workaround. */
637 priv->vmwa_context = NULL;
638 if (config->dv_flow_en) {
639 err = mlx5_alloc_shared_dr(priv);
643 /* No supported flow priority number detection. */
644 priv->config.flow_prio = -1;
645 if (!priv->config.dv_esw_en &&
646 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
647 DRV_LOG(WARNING, "metadata mode %u is not supported "
648 "(no E-Switch)", priv->config.dv_xmeta_en);
649 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
651 mlx5_set_metadata_mask(eth_dev);
652 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
653 !priv->sh->dv_regc0_mask) {
654 DRV_LOG(ERR, "metadata mode %u is not supported "
655 "(no metadata reg_c[0] is available).",
656 priv->config.dv_xmeta_en);
660 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
661 mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
662 mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
663 mlx5_hrxq_clone_free_cb);
664 /* Query availability of metadata reg_c's. */
665 err = mlx5_flow_discover_mreg_c(eth_dev);
670 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
672 "port %u extensive metadata register is not supported.",
673 eth_dev->data->port_id);
674 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
675 DRV_LOG(ERR, "metadata mode %u is not supported "
676 "(no metadata registers available).",
677 priv->config.dv_xmeta_en);
682 if (config->devx && config->dv_flow_en) {
683 priv->obj_ops = devx_obj_ops;
685 DRV_LOG(ERR, "Flow mode %u is not supported "
686 "(Windows flow must be DevX with DV flow enabled).",
687 priv->config.dv_flow_en);
691 mlx5_flow_counter_mode_config(eth_dev);
695 if (priv->mtr_profile_tbl)
696 mlx5_l3t_destroy(priv->mtr_profile_tbl);
698 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
701 eth_dev->data->dev_private = NULL;
703 if (eth_dev != NULL) {
704 /* mac_addrs must not be freed alone because part of
707 eth_dev->data->mac_addrs = NULL;
708 rte_eth_dev_release_port(eth_dev);
711 mlx5_free_shared_dev_ctx(sh);
712 MLX5_ASSERT(err > 0);
718 * This function should share events between multiple ports of single IB
719 * device. Currently it has no support under Windows.
722 * Pointer to mlx5_dev_ctx_shared object.
725 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
728 DRV_LOG(WARNING, "%s: is not supported", __func__);
732 * This function should share events between multiple ports of single IB
733 * device. Currently it has no support under Windows.
736 * Pointer to mlx5_dev_ctx_shared object.
739 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
742 DRV_LOG(WARNING, "%s: is not supported", __func__);
746 * Read statistics by a named counter.
749 * Pointer to the private device data structure.
750 * @param[in] ctr_name
751 * Pointer to the name of the statistic counter to read
753 * Pointer to read statistic value.
755 * 0 on success and stat is valud, 1 if failed to read the value
760 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
764 RTE_SET_USED(ctr_name);
766 DRV_LOG(WARNING, "%s: is not supported", __func__);
771 * Flush device MAC addresses
772 * Currently it has no support under Windows.
775 * Pointer to Ethernet device structure.
779 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
782 DRV_LOG(WARNING, "%s: is not supported", __func__);
786 * Remove a MAC address from device
787 * Currently it has no support under Windows.
790 * Pointer to Ethernet device structure.
795 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
799 DRV_LOG(WARNING, "%s: is not supported", __func__);
803 * Adds a MAC address to the device
804 * Currently it has no support under Windows.
807 * Pointer to Ethernet device structure.
809 * MAC address to register.
814 * 0 on success, a negative errno value otherwise
817 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
821 struct rte_ether_addr lmac;
823 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
825 "port %u cannot get MAC address, is mlx5_en"
826 " loaded? (errno: %s)",
827 dev->data->port_id, strerror(rte_errno));
830 if (!rte_is_same_ether_addr(&lmac, mac)) {
832 "adding new mac address to device is unsupported");
839 * Modify a VF MAC address
840 * Currently it has no support under Windows.
843 * Pointer to device private data.
845 * MAC address to modify into.
847 * Net device interface index
852 * 0 on success, a negative errno value otherwise
855 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
856 unsigned int iface_idx,
857 struct rte_ether_addr *mac_addr,
864 DRV_LOG(WARNING, "%s: is not supported", __func__);
869 * Set device promiscuous mode
870 * Currently it has no support under Windows.
873 * Pointer to Ethernet device structure.
875 * 0 - promiscuous is disabled, otherwise - enabled
878 * 0 on success, a negative error value otherwise
881 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
885 DRV_LOG(WARNING, "%s: is not supported", __func__);
890 * Set device allmulti mode
893 * Pointer to Ethernet device structure.
895 * 0 - all multicase is disabled, otherwise - enabled
898 * 0 on success, a negative error value otherwise
901 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
905 DRV_LOG(WARNING, "%s: is not supported", __func__);
910 * DPDK callback to register a PCI device.
912 * This function spawns Ethernet devices out of a given device.
915 * Pointer to the common device.
918 * 0 on success, a negative errno value otherwise and rte_errno is set.
921 mlx5_os_net_probe(struct mlx5_common_device *cdev)
923 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
924 struct mlx5_dev_spawn_data spawn = {
930 .ifindex = -1, /* Spawn will assign */
931 .info = (struct mlx5_switch_info){
932 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
935 struct mlx5_dev_config dev_config = {
937 .txq_inline_max = MLX5_ARG_UNSET,
938 .txq_inline_min = MLX5_ARG_UNSET,
939 .txq_inline_mpw = MLX5_ARG_UNSET,
940 .txqs_inline = MLX5_ARG_UNSET,
942 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
943 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
946 .log_hp_size = MLX5_ARG_UNSET,
952 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
953 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
956 ret = mlx5_os_open_device(cdev, &ctx);
958 DRV_LOG(ERR, "Fail to open DevX device %s", cdev->dev->name);
961 ret = mlx5_init_once();
963 DRV_LOG(ERR, "unable to init PMD global data: %s",
964 strerror(rte_errno));
965 claim_zero(mlx5_glue->close_device(ctx));
969 spawn.phys_dev_name = mlx5_os_get_ctx_device_name(ctx);
970 /* Device specific configuration. */
971 switch (pci_dev->id.device_id) {
972 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
973 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
974 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
975 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
976 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
977 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
978 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
985 spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
986 if (!spawn.eth_dev) {
987 claim_zero(mlx5_glue->close_device(ctx));
990 restore = spawn.eth_dev->data->dev_flags;
991 rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
992 /* Restore non-PCI flags cleared by the above call. */
993 spawn.eth_dev->data->dev_flags |= restore;
994 rte_eth_dev_probing_finish(spawn.eth_dev);
999 * Set the reg_mr and dereg_mr call backs
1001 * @param reg_mr_cb[out]
1002 * Pointer to reg_mr func
1003 * @param dereg_mr_cb[out]
1004 * Pointer to dereg_mr func
1008 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
1009 mlx5_dereg_mr_t *dereg_mr_cb)
1011 *reg_mr_cb = mlx5_os_reg_mr;
1012 *dereg_mr_cb = mlx5_os_dereg_mr;
1016 * Extract pdn of PD object using DevX
1019 * Pointer to the DevX PD object.
1021 * Pointer to the PD object number variable.
1024 * 0 on success, error value otherwise.
1027 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1032 *pdn = ((struct mlx5_pd *)pd)->pdn;
1036 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};