1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
27 #include "mlx5_autoconf.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_devx.h"
32 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
34 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
36 /* Spinlock for mlx5_shared_data allocation. */
37 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
40 * Initialize shared data between primary and secondary process.
42 * A memzone is reserved by primary process and secondary processes attach to
46 * 0 on success, a negative errno value otherwise and rte_errno is set.
49 mlx5_init_shared_data(void)
51 const struct rte_memzone *mz;
54 rte_spinlock_lock(&mlx5_shared_data_lock);
55 if (mlx5_shared_data == NULL) {
56 /* Allocate shared memory. */
57 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
58 sizeof(*mlx5_shared_data),
62 "Cannot allocate mlx5 shared data");
66 mlx5_shared_data = mz->addr;
67 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
68 rte_spinlock_init(&mlx5_shared_data->lock);
71 rte_spinlock_unlock(&mlx5_shared_data_lock);
76 * PMD global initialization.
78 * Independent from individual device, this function initializes global
79 * per-PMD data structures distinguishing primary and secondary processes.
80 * Hence, each initialization is called once per a process.
83 * 0 on success, a negative errno value otherwise and rte_errno is set.
88 if (mlx5_init_shared_data())
94 * Get mlx5 device attributes.
97 * Pointer to device context.
100 * Pointer to mlx5 device attributes.
103 * 0 on success, non zero error number otherwise
106 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
108 struct mlx5_context *mlx5_ctx;
109 struct mlx5_hca_attr hca_attr;
110 void *pv_iseg = NULL;
116 mlx5_ctx = (struct mlx5_context *)ctx;
117 memset(device_attr, 0, sizeof(*device_attr));
118 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
120 DRV_LOG(ERR, "Failed to get device hca_cap");
123 device_attr->max_cq = 1 << hca_attr.log_max_cq;
124 device_attr->max_qp = 1 << hca_attr.log_max_qp;
125 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
126 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
127 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
128 device_attr->max_pd = 1 << hca_attr.log_max_pd;
129 device_attr->max_srq = 1 << hca_attr.log_max_srq;
130 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
131 if (hca_attr.rss_ind_tbl_cap) {
132 device_attr->max_rwq_indirection_table_size =
133 1 << hca_attr.rss_ind_tbl_cap;
135 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
136 if (pv_iseg == NULL) {
137 DRV_LOG(ERR, "Failed to get device hca_iseg");
141 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
142 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
143 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
144 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
150 * Initialize DR related data within private structure.
151 * Routine checks the reference counter and does actual
152 * resources creation/initialization only if counter is zero.
155 * Pointer to the private device data structure.
158 * Zero on success, positive error code otherwise.
161 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
163 struct mlx5_dev_ctx_shared *sh = priv->sh;
167 err = mlx5_alloc_table_hash_list(priv);
169 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
170 (void *)sh->flow_tbls);
174 * Destroy DR related data within private structure.
177 * Pointer to the private device data structure.
180 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
182 mlx5_free_table_hash_list(priv);
186 * Set the completion channel file descriptor interrupt as non-blocking.
187 * Currently it has no support under Windows.
190 * Pointer to RQ channel object, which includes the channel fd
193 * The file descriptor (representing the intetrrupt) used in this channel.
196 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
199 mlx5_os_set_nonblock_channel_fd(int fd)
202 DRV_LOG(WARNING, "%s: is not supported", __func__);
207 * Function API open device under Windows
209 * This function calls the Windows glue APIs to open a device.
212 * Pointer to the device attributes (name, port, etc).
214 * Pointer to device configuration structure.
216 * Pointer to shared context structure.
219 * 0 on success, a positive error value otherwise.
222 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
223 const struct mlx5_dev_config *config,
224 struct mlx5_dev_ctx_shared *sh)
226 RTE_SET_USED(config);
228 struct mlx5_context *mlx5_ctx;
230 pthread_mutex_init(&sh->txpp.mutex, NULL);
231 /* Set numa node from pci probe */
232 sh->numa_node = spawn->pci_dev->device.numa_node;
234 /* Try to open device with DevX */
236 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
238 DRV_LOG(ERR, "open_device failed");
243 mlx5_ctx = (struct mlx5_context *)sh->ctx;
244 err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev);
246 DRV_LOG(ERR, "Failed to query device context fields.");
251 * DV flow counter mode detect and config.
254 * Pointer to rte_eth_dev structure.
258 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
260 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
261 struct mlx5_priv *priv = dev->data->dev_private;
262 struct mlx5_dev_ctx_shared *sh = priv->sh;
265 #ifndef HAVE_IBV_DEVX_ASYNC
269 if (!priv->config.devx || !priv->config.dv_flow_en ||
270 !priv->config.hca_attr.flow_counters_dump ||
271 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
272 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
276 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
277 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
278 priv->config.hca_attr.flow_counters_dump,
279 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
280 /* Initialize fallback mode only on the port initializes sh. */
282 sh->cmng.counter_fallback = fallback;
283 else if (fallback != sh->cmng.counter_fallback)
284 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
285 "with others:%d.", PORT_ID(priv), fallback);
290 * Spawn an Ethernet device from Verbs information.
293 * Backing DPDK device.
295 * Verbs device parameters (name, port, switch_info) to spawn.
297 * Device configuration parameters.
300 * A valid Ethernet device object on success, NULL otherwise and rte_errno
301 * is set. The following errors are defined:
303 * EEXIST: device is already spawned
305 static struct rte_eth_dev *
306 mlx5_dev_spawn(struct rte_device *dpdk_dev,
307 struct mlx5_dev_spawn_data *spawn,
308 struct mlx5_dev_config *config)
310 const struct mlx5_switch_info *switch_info = &spawn->info;
311 struct mlx5_dev_ctx_shared *sh = NULL;
312 struct mlx5_dev_attr device_attr;
313 struct rte_eth_dev *eth_dev = NULL;
314 struct mlx5_priv *priv = NULL;
316 unsigned int cqe_comp;
317 struct rte_ether_addr mac;
318 char name[RTE_ETH_NAME_MAX_LEN];
319 int own_domain_id = 0;
322 /* Build device name. */
323 strlcpy(name, dpdk_dev->name, sizeof(name));
324 /* check if the device is already spawned */
325 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
329 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
331 * Some parameters are needed in advance to create device context. We
332 * process the devargs here to get ones, and later process devargs
333 * again to override some hardware settings.
335 err = mlx5_args(config, dpdk_dev->devargs);
338 DRV_LOG(ERR, "failed to process device arguments: %s",
339 strerror(rte_errno));
342 mlx5_malloc_mem_select(config->sys_mem_en);
343 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
346 config->devx = sh->devx;
347 /* Initialize the shutdown event in mlx5_dev_spawn to
348 * support mlx5_is_removed for Windows.
350 err = mlx5_glue->devx_init_showdown_event(sh->ctx);
352 DRV_LOG(ERR, "failed to init showdown event: %s",
356 DRV_LOG(DEBUG, "MPW isn't supported");
357 mlx5_os_get_dev_attr(sh->ctx, &device_attr);
359 config->ind_table_max_size =
360 sh->device_attr.max_rwq_indirection_table_size;
361 if (RTE_CACHE_LINE_SIZE == 128 &&
362 !(device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
366 config->cqe_comp = cqe_comp;
367 DRV_LOG(DEBUG, "tunnel offloading is not supported");
368 config->tunnel_en = 0;
369 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
371 /* Allocate private eth device data. */
372 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
374 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
376 DRV_LOG(ERR, "priv allocation failure");
381 priv->dev_port = spawn->phys_port;
382 priv->pci_dev = spawn->pci_dev;
383 priv->mtu = RTE_ETHER_MTU;
384 priv->mp_id.port_id = port_id;
385 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
386 priv->representor = !!switch_info->representor;
387 priv->master = !!switch_info->master;
388 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
389 priv->vport_meta_tag = 0;
390 priv->vport_meta_mask = 0;
391 priv->pf_bond = spawn->pf_bond;
393 /* representor_id field keeps the unmodified VF index. */
394 priv->representor_id = -1;
396 * Look for sibling devices in order to reuse their switch domain
397 * if any, otherwise allocate one.
399 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
400 const struct mlx5_priv *opriv =
401 rte_eth_devices[port_id].data->dev_private;
404 opriv->sh != priv->sh ||
406 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
408 priv->domain_id = opriv->domain_id;
411 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
412 err = rte_eth_switch_domain_alloc(&priv->domain_id);
415 DRV_LOG(ERR, "unable to allocate switch domain: %s",
416 strerror(rte_errno));
421 /* Override some values set by hardware configuration. */
422 mlx5_args(config, dpdk_dev->devargs);
423 err = mlx5_dev_check_sibling_config(priv, config);
426 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
427 IBV_DEVICE_RAW_IP_CSUM);
428 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
429 (config->hw_csum ? "" : "not "));
430 DRV_LOG(DEBUG, "counters are not supported");
431 config->ind_table_max_size =
432 sh->device_attr.max_rwq_indirection_table_size;
434 * Remove this check once DPDK supports larger/variable
435 * indirection tables.
437 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
438 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
439 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
440 config->ind_table_max_size);
441 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
442 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
443 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
444 (config->hw_vlan_strip ? "" : "not "));
445 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
446 IBV_RAW_PACKET_CAP_SCATTER_FCS);
447 if (config->hw_padding) {
448 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
449 config->hw_padding = 0;
451 config->tso = (sh->device_attr.max_tso > 0 &&
452 (sh->device_attr.tso_supported_qpts &
453 (1 << IBV_QPT_RAW_PACKET)));
455 config->tso_max_payload_sz = sh->device_attr.max_tso;
456 DRV_LOG(DEBUG, "%sMPS is %s.",
457 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
458 config->mps == MLX5_MPW ? "legacy " : "",
459 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
460 if (config->cqe_comp && !cqe_comp) {
461 DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
462 config->cqe_comp = 0;
465 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
470 /* Check relax ordering support. */
471 sh->cmng.relaxed_ordering_read = 0;
472 sh->cmng.relaxed_ordering_write = 0;
473 if (!haswell_broadwell_cpu) {
474 sh->cmng.relaxed_ordering_write =
475 config->hca_attr.relaxed_ordering_write;
476 sh->cmng.relaxed_ordering_read =
477 config->hca_attr.relaxed_ordering_read;
481 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
483 err = config->hca_attr.access_register_user ?
484 mlx5_devx_cmd_register_read
485 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
486 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
490 /* MTUTC register is read successfully. */
491 ts_mode = MLX5_GET(register_mtutc, reg,
493 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
494 config->rt_timestamp = 1;
496 /* Kernel does not support register reading. */
497 if (config->hca_attr.dev_freq_khz ==
498 (NS_PER_S / MS_PER_S))
499 config->rt_timestamp = 1;
501 sh->rq_ts_format = config->hca_attr.rq_ts_format;
502 sh->sq_ts_format = config->hca_attr.sq_ts_format;
503 sh->qp_ts_format = config->hca_attr.qp_ts_format;
505 if (config->mprq.enabled) {
506 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
507 config->mprq.enabled = 0;
509 if (config->max_dump_files_num == 0)
510 config->max_dump_files_num = 128;
511 eth_dev = rte_eth_dev_allocate(name);
512 if (eth_dev == NULL) {
513 DRV_LOG(ERR, "can not allocate rte ethdev");
517 if (priv->representor) {
518 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
519 eth_dev->data->representor_id = priv->representor_id;
522 * Store associated network device interface index. This index
523 * is permanent throughout the lifetime of device. So, we may store
524 * the ifindex here and use the cached value further.
526 MLX5_ASSERT(spawn->ifindex);
527 priv->if_index = spawn->ifindex;
528 eth_dev->data->dev_private = priv;
529 priv->dev_data = eth_dev->data;
530 eth_dev->data->mac_addrs = priv->mac;
531 eth_dev->device = dpdk_dev;
532 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
533 /* Configure the first MAC address by default. */
534 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
536 "port %u cannot get MAC address, is mlx5_en"
537 " loaded? (errno: %s).",
538 eth_dev->data->port_id, strerror(rte_errno));
543 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
544 eth_dev->data->port_id,
545 mac.addr_bytes[0], mac.addr_bytes[1],
546 mac.addr_bytes[2], mac.addr_bytes[3],
547 mac.addr_bytes[4], mac.addr_bytes[5]);
548 #ifdef RTE_LIBRTE_MLX5_DEBUG
550 char ifname[MLX5_NAMESIZE];
552 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
553 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
554 eth_dev->data->port_id, ifname);
556 DRV_LOG(DEBUG, "port %u ifname is unknown.",
557 eth_dev->data->port_id);
560 /* Get actual MTU if possible. */
561 err = mlx5_get_mtu(eth_dev, &priv->mtu);
566 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
568 /* Initialize burst functions to prevent crashes before link-up. */
569 eth_dev->rx_pkt_burst = removed_rx_burst;
570 eth_dev->tx_pkt_burst = removed_tx_burst;
571 eth_dev->dev_ops = &mlx5_dev_ops;
572 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
573 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
574 eth_dev->rx_queue_count = mlx5_rx_queue_count;
575 /* Register MAC address. */
576 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
578 priv->ctrl_flows = 0;
579 TAILQ_INIT(&priv->flow_meters);
580 TAILQ_INIT(&priv->flow_meter_profiles);
581 /* Bring Ethernet device up. */
582 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
583 eth_dev->data->port_id);
584 /* nl calls are unsupported - set to -1 not to fail on release */
585 priv->nl_socket_rdma = -1;
586 priv->nl_socket_route = -1;
587 mlx5_set_link_up(eth_dev);
589 * Even though the interrupt handler is not installed yet,
590 * interrupts will still trigger on the async_fd from
591 * Verbs context returned by ibv_open_device().
593 mlx5_link_update(eth_dev, 0);
594 config->dv_esw_en = 0;
595 /* Detect minimal data bytes to inline. */
596 mlx5_set_min_inline(spawn, config);
597 /* Store device configuration on private structure. */
598 priv->config = *config;
599 /* Create context for virtual machine VLAN workaround. */
600 priv->vmwa_context = NULL;
601 if (config->dv_flow_en) {
602 err = mlx5_alloc_shared_dr(priv);
606 /* No supported flow priority number detection. */
607 priv->config.flow_prio = -1;
608 if (!priv->config.dv_esw_en &&
609 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
610 DRV_LOG(WARNING, "metadata mode %u is not supported "
611 "(no E-Switch)", priv->config.dv_xmeta_en);
612 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
614 mlx5_set_metadata_mask(eth_dev);
615 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
616 !priv->sh->dv_regc0_mask) {
617 DRV_LOG(ERR, "metadata mode %u is not supported "
618 "(no metadata reg_c[0] is available).",
619 priv->config.dv_xmeta_en);
623 mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
626 mlx5_hrxq_remove_cb);
627 /* Query availability of metadata reg_c's. */
628 err = mlx5_flow_discover_mreg_c(eth_dev);
633 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
635 "port %u extensive metadata register is not supported.",
636 eth_dev->data->port_id);
637 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
638 DRV_LOG(ERR, "metadata mode %u is not supported "
639 "(no metadata registers available).",
640 priv->config.dv_xmeta_en);
645 if (config->devx && config->dv_flow_en) {
646 priv->obj_ops = devx_obj_ops;
648 DRV_LOG(ERR, "Flow mode %u is not supported "
649 "(Windows flow must be DevX with DV flow enabled).",
650 priv->config.dv_flow_en);
654 mlx5_flow_counter_mode_config(eth_dev);
659 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
662 eth_dev->data->dev_private = NULL;
664 if (eth_dev != NULL) {
665 /* mac_addrs must not be freed alone because part of
668 eth_dev->data->mac_addrs = NULL;
669 rte_eth_dev_release_port(eth_dev);
672 mlx5_free_shared_dev_ctx(sh);
673 MLX5_ASSERT(err > 0);
679 * This function should share events between multiple ports of single IB
680 * device. Currently it has no support under Windows.
683 * Pointer to mlx5_dev_ctx_shared object.
686 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
689 DRV_LOG(WARNING, "%s: is not supported", __func__);
693 * This function should share events between multiple ports of single IB
694 * device. Currently it has no support under Windows.
697 * Pointer to mlx5_dev_ctx_shared object.
700 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
703 DRV_LOG(WARNING, "%s: is not supported", __func__);
707 * Read statistics by a named counter.
710 * Pointer to the private device data structure.
711 * @param[in] ctr_name
712 * Pointer to the name of the statistic counter to read
714 * Pointer to read statistic value.
716 * 0 on success and stat is valud, 1 if failed to read the value
721 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
725 RTE_SET_USED(ctr_name);
727 DRV_LOG(WARNING, "%s: is not supported", __func__);
732 * Flush device MAC addresses
733 * Currently it has no support under Windows.
736 * Pointer to Ethernet device structure.
740 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
743 DRV_LOG(WARNING, "%s: is not supported", __func__);
747 * Remove a MAC address from device
748 * Currently it has no support under Windows.
751 * Pointer to Ethernet device structure.
756 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
760 DRV_LOG(WARNING, "%s: is not supported", __func__);
764 * Adds a MAC address to the device
765 * Currently it has no support under Windows.
768 * Pointer to Ethernet device structure.
770 * MAC address to register.
775 * 0 on success, a negative errno value otherwise
778 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
782 struct rte_ether_addr lmac;
784 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
786 "port %u cannot get MAC address, is mlx5_en"
787 " loaded? (errno: %s)",
788 dev->data->port_id, strerror(rte_errno));
791 if (!rte_is_same_ether_addr(&lmac, mac)) {
793 "adding new mac address to device is unsupported");
800 * Modify a VF MAC address
801 * Currently it has no support under Windows.
804 * Pointer to device private data.
806 * MAC address to modify into.
808 * Net device interface index
813 * 0 on success, a negative errno value otherwise
816 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
817 unsigned int iface_idx,
818 struct rte_ether_addr *mac_addr,
825 DRV_LOG(WARNING, "%s: is not supported", __func__);
830 * Set device promiscuous mode
831 * Currently it has no support under Windows.
834 * Pointer to Ethernet device structure.
836 * 0 - promiscuous is disabled, otherwise - enabled
839 * 0 on success, a negative error value otherwise
842 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
846 DRV_LOG(WARNING, "%s: is not supported", __func__);
851 * Set device allmulti mode
854 * Pointer to Ethernet device structure.
856 * 0 - all multicase is disabled, otherwise - enabled
859 * 0 on success, a negative error value otherwise
862 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
866 DRV_LOG(WARNING, "%s: is not supported", __func__);
871 * Detect if a devx_device_bdf object has identical DBDF values to the
872 * rte_pci_addr found in bus/pci probing
874 * @param[in] devx_bdf
875 * Pointer to the devx_device_bdf structure.
877 * Pointer to the rte_pci_addr structure.
880 * 1 on Device match, 0 on mismatch.
883 mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf,
884 struct rte_pci_addr *addr)
886 if (addr->domain != (devx_bdf->bus_id >> 8) ||
887 addr->bus != (devx_bdf->bus_id & 0xff) ||
888 addr->devid != devx_bdf->dev_id ||
889 addr->function != devx_bdf->fnc_id) {
896 * Detect if a devx_device_bdf object matches the rte_pci_addr
897 * found in bus/pci probing
898 * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF.
900 * @param[in] devx_bdf
901 * Pointer to the devx_device_bdf structure.
903 * Pointer to the rte_pci_addr structure.
906 * 1 on Device match, 0 on mismatch, rte_errno code on failure.
909 mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,
910 struct rte_pci_addr *addr)
913 struct devx_device mlx5_dev;
915 if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr))
918 * Didn't match on Native/PF BDF, could still
919 * Match a VF BDF, check it next
921 err = mlx5_glue->query_device(devx_bdf, &mlx5_dev);
923 DRV_LOG(ERR, "query_device failed");
927 if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr))
933 * DPDK callback to register a PCI device.
935 * This function spawns Ethernet devices out of a given PCI device.
938 * PCI driver structure (mlx5_driver).
940 * PCI device information.
943 * 0 on success, a negative errno value otherwise and rte_errno is set.
946 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
947 struct rte_pci_device *pci_dev)
949 struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs;
951 * Number of found IB Devices matching with requested PCI BDF.
952 * nd != 1 means there are multiple IB devices over the same
953 * PCI device and we have representors and master.
957 * Number of found IB device Ports. nd = 1 and np = 1..n means
958 * we have the single multiport IB device, and there may be
959 * representors attached to some of found ports.
960 * Currently not supported.
961 * unsigned int np = 0;
965 * Number of DPDK ethernet devices to Spawn - either over
966 * multiple IB devices or multiple ports of single IB device.
967 * Actually this is the number of iterations to spawn.
972 * < 0 - no bonding device (single one)
973 * >= 0 - bonding device (value is slave PF index)
976 struct mlx5_dev_spawn_data *list = NULL;
977 struct mlx5_dev_config dev_config;
978 unsigned int dev_config_vf;
982 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
983 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
986 ret = mlx5_init_once();
988 DRV_LOG(ERR, "unable to init PMD global data: %s",
989 strerror(rte_errno));
993 devx_bdf_devs = mlx5_glue->get_device_list(&ret);
994 orig_devx_bdf_devs = devx_bdf_devs;
995 if (!devx_bdf_devs) {
996 rte_errno = errno ? errno : ENOSYS;
997 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1001 * First scan the list of all Infiniband devices to find
1002 * matching ones, gathering into the list.
1004 struct devx_device_bdf *devx_bdf_match[ret + 1];
1007 err = mlx5_match_devx_devices_to_addr(devx_bdf_devs,
1017 devx_bdf_match[nd++] = devx_bdf_devs;
1019 devx_bdf_match[nd] = NULL;
1021 /* No device matches, just complain and bail out. */
1023 "no DevX device matches PCI device " PCI_PRI_FMT ","
1024 " is DevX Configured?",
1025 pci_dev->addr.domain, pci_dev->addr.bus,
1026 pci_dev->addr.devid, pci_dev->addr.function);
1032 * Now we can determine the maximal
1033 * amount of devices to be spawned.
1035 list = mlx5_malloc(MLX5_MEM_ZERO,
1036 sizeof(struct mlx5_dev_spawn_data),
1037 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1039 DRV_LOG(ERR, "spawn data array allocation failure");
1044 memset(&list[ns].info, 0, sizeof(list[ns].info));
1045 list[ns].max_port = 1;
1046 list[ns].phys_port = 1;
1047 list[ns].phys_dev = devx_bdf_match[ns];
1048 list[ns].eth_dev = NULL;
1049 list[ns].pci_dev = pci_dev;
1050 list[ns].pf_bond = bd;
1051 list[ns].ifindex = -1; /* Spawn will assign */
1053 (struct mlx5_switch_info){
1056 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
1060 /* Device specific configuration. */
1061 switch (pci_dev->id.device_id) {
1062 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1063 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1064 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1065 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1066 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
1067 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
1068 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
1075 /* Default configuration. */
1076 memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
1077 dev_config.vf = dev_config_vf;
1079 dev_config.dbnc = MLX5_ARG_UNSET;
1080 dev_config.rx_vec_en = 1;
1081 dev_config.txq_inline_max = MLX5_ARG_UNSET;
1082 dev_config.txq_inline_min = MLX5_ARG_UNSET;
1083 dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
1084 dev_config.txqs_inline = MLX5_ARG_UNSET;
1085 dev_config.vf_nl_en = 0;
1086 dev_config.mr_ext_memseg_en = 1;
1087 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
1088 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
1089 dev_config.dv_esw_en = 0;
1090 dev_config.dv_flow_en = 1;
1091 dev_config.decap_en = 0;
1092 dev_config.log_hp_size = MLX5_ARG_UNSET;
1093 list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device,
1096 if (!list[ns].eth_dev)
1098 restore = list[ns].eth_dev->data->dev_flags;
1099 rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev);
1100 /* Restore non-PCI flags cleared by the above call. */
1101 list[ns].eth_dev->data->dev_flags |= restore;
1102 rte_eth_dev_probing_finish(list[ns].eth_dev);
1106 * Do the routine cleanup:
1107 * - free allocated spawn data array
1108 * - free the device list
1112 MLX5_ASSERT(orig_devx_bdf_devs);
1113 mlx5_glue->free_device_list(orig_devx_bdf_devs);
1118 * Set the reg_mr and dereg_mr call backs
1120 * @param reg_mr_cb[out]
1121 * Pointer to reg_mr func
1122 * @param dereg_mr_cb[out]
1123 * Pointer to dereg_mr func
1127 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
1128 mlx5_dereg_mr_t *dereg_mr_cb)
1130 *reg_mr_cb = mlx5_os_reg_mr;
1131 *dereg_mr_cb = mlx5_os_dereg_mr;
1135 * Extract pdn of PD object using DevX
1138 * Pointer to the DevX PD object.
1140 * Pointer to the PD object number variable.
1143 * 0 on success, error value otherwise.
1146 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1151 *pdn = ((struct mlx5_pd *)pd)->pdn;
1155 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};