1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
28 #include "mlx5_autoconf.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_devx.h"
33 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
35 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
37 /* Spinlock for mlx5_shared_data allocation. */
38 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
41 * Initialize shared data between primary and secondary process.
43 * A memzone is reserved by primary process and secondary processes attach to
47 * 0 on success, a negative errno value otherwise and rte_errno is set.
50 mlx5_init_shared_data(void)
52 const struct rte_memzone *mz;
55 rte_spinlock_lock(&mlx5_shared_data_lock);
56 if (mlx5_shared_data == NULL) {
57 /* Allocate shared memory. */
58 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
59 sizeof(*mlx5_shared_data),
63 "Cannot allocate mlx5 shared data");
67 mlx5_shared_data = mz->addr;
68 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
69 rte_spinlock_init(&mlx5_shared_data->lock);
72 rte_spinlock_unlock(&mlx5_shared_data_lock);
77 * PMD global initialization.
79 * Independent from individual device, this function initializes global
80 * per-PMD data structures distinguishing primary and secondary processes.
81 * Hence, each initialization is called once per a process.
84 * 0 on success, a negative errno value otherwise and rte_errno is set.
89 if (mlx5_init_shared_data())
95 * Get mlx5 device attributes.
98 * Pointer to device context.
101 * Pointer to mlx5 device attributes.
104 * 0 on success, non zero error number otherwise
107 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
109 struct mlx5_context *mlx5_ctx;
110 struct mlx5_hca_attr hca_attr;
111 void *pv_iseg = NULL;
117 mlx5_ctx = (struct mlx5_context *)ctx;
118 memset(device_attr, 0, sizeof(*device_attr));
119 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
121 DRV_LOG(ERR, "Failed to get device hca_cap");
124 device_attr->max_cq = 1 << hca_attr.log_max_cq;
125 device_attr->max_qp = 1 << hca_attr.log_max_qp;
126 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
127 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
128 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
129 device_attr->max_pd = 1 << hca_attr.log_max_pd;
130 device_attr->max_srq = 1 << hca_attr.log_max_srq;
131 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
132 if (hca_attr.rss_ind_tbl_cap) {
133 device_attr->max_rwq_indirection_table_size =
134 1 << hca_attr.rss_ind_tbl_cap;
136 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
137 if (pv_iseg == NULL) {
138 DRV_LOG(ERR, "Failed to get device hca_iseg");
142 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
143 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
144 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
145 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
151 * Initialize DR related data within private structure.
152 * Routine checks the reference counter and does actual
153 * resources creation/initialization only if counter is zero.
156 * Pointer to the private device data structure.
159 * Zero on success, positive error code otherwise.
162 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
164 struct mlx5_dev_ctx_shared *sh = priv->sh;
168 err = mlx5_alloc_table_hash_list(priv);
170 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
171 (void *)sh->flow_tbls);
175 * Destroy DR related data within private structure.
178 * Pointer to the private device data structure.
181 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
183 mlx5_free_table_hash_list(priv);
187 * Set the completion channel file descriptor interrupt as non-blocking.
188 * Currently it has no support under Windows.
191 * Pointer to RQ channel object, which includes the channel fd
194 * The file descriptor (representing the intetrrupt) used in this channel.
197 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
200 mlx5_os_set_nonblock_channel_fd(int fd)
203 DRV_LOG(WARNING, "%s: is not supported", __func__);
208 * Function API open device under Windows
210 * This function calls the Windows glue APIs to open a device.
213 * Pointer to the device attributes (name, port, etc).
215 * Pointer to device configuration structure.
217 * Pointer to shared context structure.
220 * 0 on success, a positive error value otherwise.
223 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
224 const struct mlx5_dev_config *config,
225 struct mlx5_dev_ctx_shared *sh)
227 RTE_SET_USED(config);
229 struct mlx5_context *mlx5_ctx;
231 pthread_mutex_init(&sh->txpp.mutex, NULL);
232 /* Set numa node from pci probe */
233 sh->numa_node = spawn->pci_dev->device.numa_node;
235 /* Try to open device with DevX */
237 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
239 DRV_LOG(ERR, "open_device failed");
244 mlx5_ctx = (struct mlx5_context *)sh->ctx;
245 err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev);
247 DRV_LOG(ERR, "Failed to query device context fields.");
252 * DV flow counter mode detect and config.
255 * Pointer to rte_eth_dev structure.
259 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
261 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
262 struct mlx5_priv *priv = dev->data->dev_private;
263 struct mlx5_dev_ctx_shared *sh = priv->sh;
266 #ifndef HAVE_IBV_DEVX_ASYNC
270 if (!priv->config.devx || !priv->config.dv_flow_en ||
271 !priv->config.hca_attr.flow_counters_dump ||
272 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
273 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
277 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
278 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
279 priv->config.hca_attr.flow_counters_dump,
280 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
281 /* Initialize fallback mode only on the port initializes sh. */
283 sh->cmng.counter_fallback = fallback;
284 else if (fallback != sh->cmng.counter_fallback)
285 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
286 "with others:%d.", PORT_ID(priv), fallback);
291 * Spawn an Ethernet device from Verbs information.
294 * Backing DPDK device.
296 * Verbs device parameters (name, port, switch_info) to spawn.
298 * Device configuration parameters.
301 * A valid Ethernet device object on success, NULL otherwise and rte_errno
302 * is set. The following errors are defined:
304 * EEXIST: device is already spawned
306 static struct rte_eth_dev *
307 mlx5_dev_spawn(struct rte_device *dpdk_dev,
308 struct mlx5_dev_spawn_data *spawn,
309 struct mlx5_dev_config *config)
311 const struct mlx5_switch_info *switch_info = &spawn->info;
312 struct mlx5_dev_ctx_shared *sh = NULL;
313 struct mlx5_dev_attr device_attr;
314 struct rte_eth_dev *eth_dev = NULL;
315 struct mlx5_priv *priv = NULL;
317 unsigned int cqe_comp;
318 struct rte_ether_addr mac;
319 char name[RTE_ETH_NAME_MAX_LEN];
320 int own_domain_id = 0;
323 /* Build device name. */
324 strlcpy(name, dpdk_dev->name, sizeof(name));
325 /* check if the device is already spawned */
326 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
330 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
332 * Some parameters are needed in advance to create device context. We
333 * process the devargs here to get ones, and later process devargs
334 * again to override some hardware settings.
336 err = mlx5_args(config, dpdk_dev->devargs);
339 DRV_LOG(ERR, "failed to process device arguments: %s",
340 strerror(rte_errno));
343 mlx5_malloc_mem_select(config->sys_mem_en);
344 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
347 config->devx = sh->devx;
348 /* Initialize the shutdown event in mlx5_dev_spawn to
349 * support mlx5_is_removed for Windows.
351 err = mlx5_glue->devx_init_showdown_event(sh->ctx);
353 DRV_LOG(ERR, "failed to init showdown event: %s",
357 DRV_LOG(DEBUG, "MPW isn't supported");
358 mlx5_os_get_dev_attr(sh->ctx, &device_attr);
360 config->ind_table_max_size =
361 sh->device_attr.max_rwq_indirection_table_size;
362 if (RTE_CACHE_LINE_SIZE == 128 &&
363 !(device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
367 config->cqe_comp = cqe_comp;
368 DRV_LOG(DEBUG, "tunnel offloading is not supported");
369 config->tunnel_en = 0;
370 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
372 /* Allocate private eth device data. */
373 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
375 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
377 DRV_LOG(ERR, "priv allocation failure");
382 priv->dev_port = spawn->phys_port;
383 priv->pci_dev = spawn->pci_dev;
384 priv->mtu = RTE_ETHER_MTU;
385 priv->mp_id.port_id = port_id;
386 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
387 priv->representor = !!switch_info->representor;
388 priv->master = !!switch_info->master;
389 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
390 priv->vport_meta_tag = 0;
391 priv->vport_meta_mask = 0;
392 priv->pf_bond = spawn->pf_bond;
394 /* representor_id field keeps the unmodified VF index. */
395 priv->representor_id = -1;
397 * Look for sibling devices in order to reuse their switch domain
398 * if any, otherwise allocate one.
400 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
401 const struct mlx5_priv *opriv =
402 rte_eth_devices[port_id].data->dev_private;
405 opriv->sh != priv->sh ||
407 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
409 priv->domain_id = opriv->domain_id;
412 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
413 err = rte_eth_switch_domain_alloc(&priv->domain_id);
416 DRV_LOG(ERR, "unable to allocate switch domain: %s",
417 strerror(rte_errno));
422 /* Override some values set by hardware configuration. */
423 mlx5_args(config, dpdk_dev->devargs);
424 err = mlx5_dev_check_sibling_config(priv, config);
427 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
428 IBV_DEVICE_RAW_IP_CSUM);
429 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
430 (config->hw_csum ? "" : "not "));
431 DRV_LOG(DEBUG, "counters are not supported");
432 config->ind_table_max_size =
433 sh->device_attr.max_rwq_indirection_table_size;
435 * Remove this check once DPDK supports larger/variable
436 * indirection tables.
438 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
439 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
440 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
441 config->ind_table_max_size);
442 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
443 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
444 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
445 (config->hw_vlan_strip ? "" : "not "));
446 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
447 IBV_RAW_PACKET_CAP_SCATTER_FCS);
448 if (config->hw_padding) {
449 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
450 config->hw_padding = 0;
452 config->tso = (sh->device_attr.max_tso > 0 &&
453 (sh->device_attr.tso_supported_qpts &
454 (1 << IBV_QPT_RAW_PACKET)));
456 config->tso_max_payload_sz = sh->device_attr.max_tso;
457 DRV_LOG(DEBUG, "%sMPS is %s.",
458 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
459 config->mps == MLX5_MPW ? "legacy " : "",
460 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
461 if (config->cqe_comp && !cqe_comp) {
462 DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
463 config->cqe_comp = 0;
466 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
471 /* Check relax ordering support. */
472 sh->cmng.relaxed_ordering_read = 0;
473 sh->cmng.relaxed_ordering_write = 0;
474 if (!haswell_broadwell_cpu) {
475 sh->cmng.relaxed_ordering_write =
476 config->hca_attr.relaxed_ordering_write;
477 sh->cmng.relaxed_ordering_read =
478 config->hca_attr.relaxed_ordering_read;
482 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
484 err = config->hca_attr.access_register_user ?
485 mlx5_devx_cmd_register_read
486 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
487 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
491 /* MTUTC register is read successfully. */
492 ts_mode = MLX5_GET(register_mtutc, reg,
494 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
495 config->rt_timestamp = 1;
497 /* Kernel does not support register reading. */
498 if (config->hca_attr.dev_freq_khz ==
499 (NS_PER_S / MS_PER_S))
500 config->rt_timestamp = 1;
502 sh->rq_ts_format = config->hca_attr.rq_ts_format;
503 sh->sq_ts_format = config->hca_attr.sq_ts_format;
504 sh->qp_ts_format = config->hca_attr.qp_ts_format;
506 if (config->mprq.enabled) {
507 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
508 config->mprq.enabled = 0;
510 if (config->max_dump_files_num == 0)
511 config->max_dump_files_num = 128;
512 eth_dev = rte_eth_dev_allocate(name);
513 if (eth_dev == NULL) {
514 DRV_LOG(ERR, "can not allocate rte ethdev");
518 if (priv->representor) {
519 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
520 eth_dev->data->representor_id = priv->representor_id;
523 * Store associated network device interface index. This index
524 * is permanent throughout the lifetime of device. So, we may store
525 * the ifindex here and use the cached value further.
527 MLX5_ASSERT(spawn->ifindex);
528 priv->if_index = spawn->ifindex;
529 eth_dev->data->dev_private = priv;
530 priv->dev_data = eth_dev->data;
531 eth_dev->data->mac_addrs = priv->mac;
532 eth_dev->device = dpdk_dev;
533 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
534 /* Configure the first MAC address by default. */
535 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
537 "port %u cannot get MAC address, is mlx5_en"
538 " loaded? (errno: %s).",
539 eth_dev->data->port_id, strerror(rte_errno));
544 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
545 eth_dev->data->port_id,
546 mac.addr_bytes[0], mac.addr_bytes[1],
547 mac.addr_bytes[2], mac.addr_bytes[3],
548 mac.addr_bytes[4], mac.addr_bytes[5]);
549 #ifdef RTE_LIBRTE_MLX5_DEBUG
551 char ifname[MLX5_NAMESIZE];
553 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
554 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
555 eth_dev->data->port_id, ifname);
557 DRV_LOG(DEBUG, "port %u ifname is unknown.",
558 eth_dev->data->port_id);
561 /* Get actual MTU if possible. */
562 err = mlx5_get_mtu(eth_dev, &priv->mtu);
567 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
569 /* Initialize burst functions to prevent crashes before link-up. */
570 eth_dev->rx_pkt_burst = removed_rx_burst;
571 eth_dev->tx_pkt_burst = removed_tx_burst;
572 eth_dev->dev_ops = &mlx5_dev_ops;
573 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
574 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
575 eth_dev->rx_queue_count = mlx5_rx_queue_count;
576 /* Register MAC address. */
577 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
579 priv->ctrl_flows = 0;
580 TAILQ_INIT(&priv->flow_meters);
581 TAILQ_INIT(&priv->flow_meter_profiles);
582 /* Bring Ethernet device up. */
583 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
584 eth_dev->data->port_id);
585 /* nl calls are unsupported - set to -1 not to fail on release */
586 priv->nl_socket_rdma = -1;
587 priv->nl_socket_route = -1;
588 mlx5_set_link_up(eth_dev);
590 * Even though the interrupt handler is not installed yet,
591 * interrupts will still trigger on the async_fd from
592 * Verbs context returned by ibv_open_device().
594 mlx5_link_update(eth_dev, 0);
595 config->dv_esw_en = 0;
596 /* Detect minimal data bytes to inline. */
597 mlx5_set_min_inline(spawn, config);
598 /* Store device configuration on private structure. */
599 priv->config = *config;
600 /* Create context for virtual machine VLAN workaround. */
601 priv->vmwa_context = NULL;
602 if (config->dv_flow_en) {
603 err = mlx5_alloc_shared_dr(priv);
607 /* No supported flow priority number detection. */
608 priv->config.flow_prio = -1;
609 if (!priv->config.dv_esw_en &&
610 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
611 DRV_LOG(WARNING, "metadata mode %u is not supported "
612 "(no E-Switch)", priv->config.dv_xmeta_en);
613 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
615 mlx5_set_metadata_mask(eth_dev);
616 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
617 !priv->sh->dv_regc0_mask) {
618 DRV_LOG(ERR, "metadata mode %u is not supported "
619 "(no metadata reg_c[0] is available).",
620 priv->config.dv_xmeta_en);
624 mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
627 mlx5_hrxq_remove_cb);
628 /* Query availability of metadata reg_c's. */
629 err = mlx5_flow_discover_mreg_c(eth_dev);
634 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
636 "port %u extensive metadata register is not supported.",
637 eth_dev->data->port_id);
638 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
639 DRV_LOG(ERR, "metadata mode %u is not supported "
640 "(no metadata registers available).",
641 priv->config.dv_xmeta_en);
646 if (config->devx && config->dv_flow_en) {
647 priv->obj_ops = devx_obj_ops;
649 DRV_LOG(ERR, "Flow mode %u is not supported "
650 "(Windows flow must be DevX with DV flow enabled).",
651 priv->config.dv_flow_en);
655 mlx5_flow_counter_mode_config(eth_dev);
660 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
663 eth_dev->data->dev_private = NULL;
665 if (eth_dev != NULL) {
666 /* mac_addrs must not be freed alone because part of
669 eth_dev->data->mac_addrs = NULL;
670 rte_eth_dev_release_port(eth_dev);
673 mlx5_free_shared_dev_ctx(sh);
674 MLX5_ASSERT(err > 0);
680 * This function should share events between multiple ports of single IB
681 * device. Currently it has no support under Windows.
684 * Pointer to mlx5_dev_ctx_shared object.
687 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
690 DRV_LOG(WARNING, "%s: is not supported", __func__);
694 * This function should share events between multiple ports of single IB
695 * device. Currently it has no support under Windows.
698 * Pointer to mlx5_dev_ctx_shared object.
701 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
704 DRV_LOG(WARNING, "%s: is not supported", __func__);
708 * Read statistics by a named counter.
711 * Pointer to the private device data structure.
712 * @param[in] ctr_name
713 * Pointer to the name of the statistic counter to read
715 * Pointer to read statistic value.
717 * 0 on success and stat is valud, 1 if failed to read the value
722 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
726 RTE_SET_USED(ctr_name);
728 DRV_LOG(WARNING, "%s: is not supported", __func__);
733 * Flush device MAC addresses
734 * Currently it has no support under Windows.
737 * Pointer to Ethernet device structure.
741 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
744 DRV_LOG(WARNING, "%s: is not supported", __func__);
748 * Remove a MAC address from device
749 * Currently it has no support under Windows.
752 * Pointer to Ethernet device structure.
757 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
761 DRV_LOG(WARNING, "%s: is not supported", __func__);
765 * Adds a MAC address to the device
766 * Currently it has no support under Windows.
769 * Pointer to Ethernet device structure.
771 * MAC address to register.
776 * 0 on success, a negative errno value otherwise
779 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
783 struct rte_ether_addr lmac;
785 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
787 "port %u cannot get MAC address, is mlx5_en"
788 " loaded? (errno: %s)",
789 dev->data->port_id, strerror(rte_errno));
792 if (!rte_is_same_ether_addr(&lmac, mac)) {
794 "adding new mac address to device is unsupported");
801 * Modify a VF MAC address
802 * Currently it has no support under Windows.
805 * Pointer to device private data.
807 * MAC address to modify into.
809 * Net device interface index
814 * 0 on success, a negative errno value otherwise
817 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
818 unsigned int iface_idx,
819 struct rte_ether_addr *mac_addr,
826 DRV_LOG(WARNING, "%s: is not supported", __func__);
831 * Set device promiscuous mode
832 * Currently it has no support under Windows.
835 * Pointer to Ethernet device structure.
837 * 0 - promiscuous is disabled, otherwise - enabled
840 * 0 on success, a negative error value otherwise
843 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
847 DRV_LOG(WARNING, "%s: is not supported", __func__);
852 * Set device allmulti mode
855 * Pointer to Ethernet device structure.
857 * 0 - all multicase is disabled, otherwise - enabled
860 * 0 on success, a negative error value otherwise
863 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
867 DRV_LOG(WARNING, "%s: is not supported", __func__);
872 * Detect if a devx_device_bdf object has identical DBDF values to the
873 * rte_pci_addr found in bus/pci probing
875 * @param[in] devx_bdf
876 * Pointer to the devx_device_bdf structure.
878 * Pointer to the rte_pci_addr structure.
881 * 1 on Device match, 0 on mismatch.
884 mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf,
885 struct rte_pci_addr *addr)
887 if (addr->domain != (devx_bdf->bus_id >> 8) ||
888 addr->bus != (devx_bdf->bus_id & 0xff) ||
889 addr->devid != devx_bdf->dev_id ||
890 addr->function != devx_bdf->fnc_id) {
897 * Detect if a devx_device_bdf object matches the rte_pci_addr
898 * found in bus/pci probing
899 * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF.
901 * @param[in] devx_bdf
902 * Pointer to the devx_device_bdf structure.
904 * Pointer to the rte_pci_addr structure.
907 * 1 on Device match, 0 on mismatch, rte_errno code on failure.
910 mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,
911 struct rte_pci_addr *addr)
914 struct devx_device mlx5_dev;
916 if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr))
919 * Didn't match on Native/PF BDF, could still
920 * Match a VF BDF, check it next
922 err = mlx5_glue->query_device(devx_bdf, &mlx5_dev);
924 DRV_LOG(ERR, "query_device failed");
928 if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr))
934 * DPDK callback to register a PCI device.
936 * This function spawns Ethernet devices out of a given PCI device.
939 * PCI driver structure (mlx5_driver).
941 * PCI device information.
944 * 0 on success, a negative errno value otherwise and rte_errno is set.
947 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
948 struct rte_pci_device *pci_dev)
950 struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs;
952 * Number of found IB Devices matching with requested PCI BDF.
953 * nd != 1 means there are multiple IB devices over the same
954 * PCI device and we have representors and master.
958 * Number of found IB device Ports. nd = 1 and np = 1..n means
959 * we have the single multiport IB device, and there may be
960 * representors attached to some of found ports.
961 * Currently not supported.
962 * unsigned int np = 0;
966 * Number of DPDK ethernet devices to Spawn - either over
967 * multiple IB devices or multiple ports of single IB device.
968 * Actually this is the number of iterations to spawn.
973 * < 0 - no bonding device (single one)
974 * >= 0 - bonding device (value is slave PF index)
977 struct mlx5_dev_spawn_data *list = NULL;
978 struct mlx5_dev_config dev_config;
979 unsigned int dev_config_vf;
983 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
984 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
987 ret = mlx5_init_once();
989 DRV_LOG(ERR, "unable to init PMD global data: %s",
990 strerror(rte_errno));
994 devx_bdf_devs = mlx5_glue->get_device_list(&ret);
995 orig_devx_bdf_devs = devx_bdf_devs;
996 if (!devx_bdf_devs) {
997 rte_errno = errno ? errno : ENOSYS;
998 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1002 * First scan the list of all Infiniband devices to find
1003 * matching ones, gathering into the list.
1005 struct devx_device_bdf *devx_bdf_match[ret + 1];
1008 err = mlx5_match_devx_devices_to_addr(devx_bdf_devs,
1018 devx_bdf_match[nd++] = devx_bdf_devs;
1020 devx_bdf_match[nd] = NULL;
1022 /* No device matches, just complain and bail out. */
1024 "no DevX device matches PCI device " PCI_PRI_FMT ","
1025 " is DevX Configured?",
1026 pci_dev->addr.domain, pci_dev->addr.bus,
1027 pci_dev->addr.devid, pci_dev->addr.function);
1033 * Now we can determine the maximal
1034 * amount of devices to be spawned.
1036 list = mlx5_malloc(MLX5_MEM_ZERO,
1037 sizeof(struct mlx5_dev_spawn_data),
1038 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1040 DRV_LOG(ERR, "spawn data array allocation failure");
1045 memset(&list[ns].info, 0, sizeof(list[ns].info));
1046 list[ns].max_port = 1;
1047 list[ns].phys_port = 1;
1048 list[ns].phys_dev = devx_bdf_match[ns];
1049 list[ns].eth_dev = NULL;
1050 list[ns].pci_dev = pci_dev;
1051 list[ns].pf_bond = bd;
1052 list[ns].ifindex = -1; /* Spawn will assign */
1054 (struct mlx5_switch_info){
1057 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
1061 /* Device specific configuration. */
1062 switch (pci_dev->id.device_id) {
1063 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1064 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1065 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1066 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1067 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
1068 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
1069 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
1076 /* Default configuration. */
1077 memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
1078 dev_config.vf = dev_config_vf;
1080 dev_config.dbnc = MLX5_ARG_UNSET;
1081 dev_config.rx_vec_en = 1;
1082 dev_config.txq_inline_max = MLX5_ARG_UNSET;
1083 dev_config.txq_inline_min = MLX5_ARG_UNSET;
1084 dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
1085 dev_config.txqs_inline = MLX5_ARG_UNSET;
1086 dev_config.vf_nl_en = 0;
1087 dev_config.mr_ext_memseg_en = 1;
1088 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
1089 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
1090 dev_config.dv_esw_en = 0;
1091 dev_config.dv_flow_en = 1;
1092 dev_config.decap_en = 0;
1093 dev_config.log_hp_size = MLX5_ARG_UNSET;
1094 list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device,
1097 if (!list[ns].eth_dev)
1099 restore = list[ns].eth_dev->data->dev_flags;
1100 rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev);
1101 /* Restore non-PCI flags cleared by the above call. */
1102 list[ns].eth_dev->data->dev_flags |= restore;
1103 rte_eth_dev_probing_finish(list[ns].eth_dev);
1107 * Do the routine cleanup:
1108 * - free allocated spawn data array
1109 * - free the device list
1113 MLX5_ASSERT(orig_devx_bdf_devs);
1114 mlx5_glue->free_device_list(orig_devx_bdf_devs);
1119 * Set the reg_mr and dereg_mr call backs
1121 * @param reg_mr_cb[out]
1122 * Pointer to reg_mr func
1123 * @param dereg_mr_cb[out]
1124 * Pointer to dereg_mr func
1128 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
1129 mlx5_dereg_mr_t *dereg_mr_cb)
1131 *reg_mr_cb = mlx5_os_reg_mr;
1132 *dereg_mr_cb = mlx5_os_dereg_mr;
1136 * Extract pdn of PD object using DevX
1139 * Pointer to the DevX PD object.
1141 * Pointer to the PD object number variable.
1144 * 0 on success, error value otherwise.
1147 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1152 *pdn = ((struct mlx5_pd *)pd)->pdn;
1156 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};