1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_windows.h>
12 #include <rte_ethdev_pci.h>
14 #include <mlx5_glue.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common.h>
17 #include <mlx5_common_mp.h>
18 #include <mlx5_common_mr.h>
19 #include <mlx5_malloc.h>
21 #include "mlx5_defs.h"
23 #include "mlx5_common_os.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_rxtx.h"
26 #include "mlx5_autoconf.h"
28 #include "mlx5_flow.h"
29 #include "mlx5_devx.h"
31 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
33 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
35 /* Spinlock for mlx5_shared_data allocation. */
36 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
39 * Initialize shared data between primary and secondary process.
41 * A memzone is reserved by primary process and secondary processes attach to
45 * 0 on success, a negative errno value otherwise and rte_errno is set.
48 mlx5_init_shared_data(void)
50 const struct rte_memzone *mz;
53 rte_spinlock_lock(&mlx5_shared_data_lock);
54 if (mlx5_shared_data == NULL) {
55 /* Allocate shared memory. */
56 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
57 sizeof(*mlx5_shared_data),
61 "Cannot allocate mlx5 shared data");
65 mlx5_shared_data = mz->addr;
66 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
67 rte_spinlock_init(&mlx5_shared_data->lock);
70 rte_spinlock_unlock(&mlx5_shared_data_lock);
75 * PMD global initialization.
77 * Independent from individual device, this function initializes global
78 * per-PMD data structures distinguishing primary and secondary processes.
79 * Hence, each initialization is called once per a process.
82 * 0 on success, a negative errno value otherwise and rte_errno is set.
87 if (mlx5_init_shared_data())
93 * Get mlx5 device attributes.
96 * Pointer to device context.
99 * Pointer to mlx5 device attributes.
102 * 0 on success, non zero error number otherwise
105 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
107 struct mlx5_context *mlx5_ctx;
108 struct mlx5_hca_attr hca_attr;
109 void *pv_iseg = NULL;
115 mlx5_ctx = (struct mlx5_context *)ctx;
116 memset(device_attr, 0, sizeof(*device_attr));
117 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
119 DRV_LOG(ERR, "Failed to get device hca_cap");
122 device_attr->max_cq = 1 << hca_attr.log_max_cq;
123 device_attr->max_qp = 1 << hca_attr.log_max_qp;
124 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
125 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
126 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
127 device_attr->max_pd = 1 << hca_attr.log_max_pd;
128 device_attr->max_srq = 1 << hca_attr.log_max_srq;
129 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
130 if (hca_attr.rss_ind_tbl_cap) {
131 device_attr->max_rwq_indirection_table_size =
132 1 << hca_attr.rss_ind_tbl_cap;
134 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
135 if (pv_iseg == NULL) {
136 DRV_LOG(ERR, "Failed to get device hca_iseg");
140 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
141 MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
142 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
143 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
149 * Initialize DR related data within private structure.
150 * Routine checks the reference counter and does actual
151 * resources creation/initialization only if counter is zero.
154 * Pointer to the private device data structure.
157 * Zero on success, positive error code otherwise.
160 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
162 struct mlx5_dev_ctx_shared *sh = priv->sh;
166 err = mlx5_alloc_table_hash_list(priv);
168 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
169 (void *)sh->flow_tbls);
173 * Destroy DR related data within private structure.
176 * Pointer to the private device data structure.
179 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
181 mlx5_free_table_hash_list(priv);
185 * Set the completion channel file descriptor interrupt as non-blocking.
186 * Currently it has no support under Windows.
189 * Pointer to RQ channel object, which includes the channel fd
192 * The file descriptor (representing the intetrrupt) used in this channel.
195 * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
198 mlx5_os_set_nonblock_channel_fd(int fd)
201 DRV_LOG(WARNING, "%s: is not supported", __func__);
206 * Function API open device under Windows
208 * This function calls the Windows glue APIs to open a device.
211 * Pointer to the device attributes (name, port, etc).
213 * Pointer to device configuration structure.
215 * Pointer to shared context structure.
218 * 0 on success, a positive error value otherwise.
221 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
222 const struct mlx5_dev_config *config,
223 struct mlx5_dev_ctx_shared *sh)
225 RTE_SET_USED(config);
227 struct mlx5_context *mlx5_ctx;
229 pthread_mutex_init(&sh->txpp.mutex, NULL);
230 /* Set numa node from pci probe */
231 sh->numa_node = spawn->pci_dev->device.numa_node;
233 /* Try to open device with DevX */
235 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
237 DRV_LOG(ERR, "open_device failed");
242 mlx5_ctx = (struct mlx5_context *)sh->ctx;
243 err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev);
245 DRV_LOG(ERR, "Failed to query device context fields.");
250 * DV flow counter mode detect and config.
253 * Pointer to rte_eth_dev structure.
257 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
259 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
260 struct mlx5_priv *priv = dev->data->dev_private;
261 struct mlx5_dev_ctx_shared *sh = priv->sh;
264 #ifndef HAVE_IBV_DEVX_ASYNC
268 if (!priv->config.devx || !priv->config.dv_flow_en ||
269 !priv->config.hca_attr.flow_counters_dump ||
270 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) ||
271 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
275 DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
276 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
277 priv->config.hca_attr.flow_counters_dump,
278 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap);
279 /* Initialize fallback mode only on the port initializes sh. */
281 sh->cmng.counter_fallback = fallback;
282 else if (fallback != sh->cmng.counter_fallback)
283 DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
284 "with others:%d.", PORT_ID(priv), fallback);
289 * Spawn an Ethernet device from Verbs information.
292 * Backing DPDK device.
294 * Verbs device parameters (name, port, switch_info) to spawn.
296 * Device configuration parameters.
299 * A valid Ethernet device object on success, NULL otherwise and rte_errno
300 * is set. The following errors are defined:
302 * EEXIST: device is already spawned
304 static struct rte_eth_dev *
305 mlx5_dev_spawn(struct rte_device *dpdk_dev,
306 struct mlx5_dev_spawn_data *spawn,
307 struct mlx5_dev_config *config)
309 const struct mlx5_switch_info *switch_info = &spawn->info;
310 struct mlx5_dev_ctx_shared *sh = NULL;
311 struct mlx5_dev_attr device_attr;
312 struct rte_eth_dev *eth_dev = NULL;
313 struct mlx5_priv *priv = NULL;
315 unsigned int cqe_comp;
316 unsigned int cqe_pad = 0;
317 struct rte_ether_addr mac;
318 char name[RTE_ETH_NAME_MAX_LEN];
319 int own_domain_id = 0;
322 /* Build device name. */
323 strlcpy(name, dpdk_dev->name, sizeof(name));
324 /* check if the device is already spawned */
325 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
329 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
331 * Some parameters are needed in advance to create device context. We
332 * process the devargs here to get ones, and later process devargs
333 * again to override some hardware settings.
335 err = mlx5_args(config, dpdk_dev->devargs);
338 DRV_LOG(ERR, "failed to process device arguments: %s",
339 strerror(rte_errno));
342 mlx5_malloc_mem_select(config->sys_mem_en);
343 sh = mlx5_alloc_shared_dev_ctx(spawn, config);
346 config->devx = sh->devx;
347 /* Initialize the shutdown event in mlx5_dev_spawn to
348 * support mlx5_is_removed for Windows.
350 err = mlx5_glue->devx_init_showdown_event(sh->ctx);
352 DRV_LOG(ERR, "failed to init showdown event: %s",
356 DRV_LOG(DEBUG, "MPW isn't supported");
357 mlx5_os_get_dev_attr(sh->ctx, &device_attr);
359 config->ind_table_max_size =
360 sh->device_attr.max_rwq_indirection_table_size;
361 if (RTE_CACHE_LINE_SIZE == 128 &&
362 !(device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
366 config->cqe_comp = cqe_comp;
367 DRV_LOG(DEBUG, "tunnel offloading is not supported");
368 config->tunnel_en = 0;
369 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
371 /* Allocate private eth device data. */
372 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
374 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
376 DRV_LOG(ERR, "priv allocation failure");
381 priv->dev_port = spawn->phys_port;
382 priv->pci_dev = spawn->pci_dev;
383 priv->mtu = RTE_ETHER_MTU;
384 priv->mp_id.port_id = port_id;
385 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
386 priv->representor = !!switch_info->representor;
387 priv->master = !!switch_info->master;
388 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
389 priv->vport_meta_tag = 0;
390 priv->vport_meta_mask = 0;
391 priv->pf_bond = spawn->pf_bond;
393 /* representor_id field keeps the unmodified VF index. */
394 priv->representor_id = -1;
396 * Look for sibling devices in order to reuse their switch domain
397 * if any, otherwise allocate one.
399 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
400 const struct mlx5_priv *opriv =
401 rte_eth_devices[port_id].data->dev_private;
404 opriv->sh != priv->sh ||
406 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
408 priv->domain_id = opriv->domain_id;
411 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
412 err = rte_eth_switch_domain_alloc(&priv->domain_id);
415 DRV_LOG(ERR, "unable to allocate switch domain: %s",
416 strerror(rte_errno));
421 /* Override some values set by hardware configuration. */
422 mlx5_args(config, dpdk_dev->devargs);
423 err = mlx5_dev_check_sibling_config(priv, config);
426 config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
427 IBV_DEVICE_RAW_IP_CSUM);
428 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
429 (config->hw_csum ? "" : "not "));
430 DRV_LOG(DEBUG, "counters are not supported");
431 config->ind_table_max_size =
432 sh->device_attr.max_rwq_indirection_table_size;
434 * Remove this check once DPDK supports larger/variable
435 * indirection tables.
437 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
438 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
439 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
440 config->ind_table_max_size);
441 config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
442 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
443 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
444 (config->hw_vlan_strip ? "" : "not "));
445 config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
446 IBV_RAW_PACKET_CAP_SCATTER_FCS);
447 if (config->hw_padding) {
448 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
449 config->hw_padding = 0;
451 config->tso = (sh->device_attr.max_tso > 0 &&
452 (sh->device_attr.tso_supported_qpts &
453 (1 << IBV_QPT_RAW_PACKET)));
455 config->tso_max_payload_sz = sh->device_attr.max_tso;
456 DRV_LOG(DEBUG, "%sMPS is %s.",
457 config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
458 config->mps == MLX5_MPW ? "legacy " : "",
459 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
460 if (config->cqe_comp && !cqe_comp) {
461 DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
462 config->cqe_comp = 0;
464 if (config->cqe_pad && !cqe_pad) {
465 DRV_LOG(WARNING, "Rx CQE padding isn't supported.");
467 } else if (config->cqe_pad) {
468 DRV_LOG(INFO, "Rx CQE padding is enabled.");
471 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
476 /* Check relax ordering support. */
477 sh->cmng.relaxed_ordering_read = 0;
478 sh->cmng.relaxed_ordering_write = 0;
479 if (!haswell_broadwell_cpu) {
480 sh->cmng.relaxed_ordering_write =
481 config->hca_attr.relaxed_ordering_write;
482 sh->cmng.relaxed_ordering_read =
483 config->hca_attr.relaxed_ordering_read;
487 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
489 err = config->hca_attr.access_register_user ?
490 mlx5_devx_cmd_register_read
491 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
492 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
496 /* MTUTC register is read successfully. */
497 ts_mode = MLX5_GET(register_mtutc, reg,
499 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
500 config->rt_timestamp = 1;
502 /* Kernel does not support register reading. */
503 if (config->hca_attr.dev_freq_khz ==
504 (NS_PER_S / MS_PER_S))
505 config->rt_timestamp = 1;
508 if (config->mprq.enabled) {
509 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
510 config->mprq.enabled = 0;
512 if (config->max_dump_files_num == 0)
513 config->max_dump_files_num = 128;
514 eth_dev = rte_eth_dev_allocate(name);
515 if (eth_dev == NULL) {
516 DRV_LOG(ERR, "can not allocate rte ethdev");
520 if (priv->representor) {
521 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
522 eth_dev->data->representor_id = priv->representor_id;
525 * Store associated network device interface index. This index
526 * is permanent throughout the lifetime of device. So, we may store
527 * the ifindex here and use the cached value further.
529 MLX5_ASSERT(spawn->ifindex);
530 priv->if_index = spawn->ifindex;
531 eth_dev->data->dev_private = priv;
532 priv->dev_data = eth_dev->data;
533 eth_dev->data->mac_addrs = priv->mac;
534 eth_dev->device = dpdk_dev;
535 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
536 /* Configure the first MAC address by default. */
537 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
539 "port %u cannot get MAC address, is mlx5_en"
540 " loaded? (errno: %s).",
541 eth_dev->data->port_id, strerror(rte_errno));
546 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
547 eth_dev->data->port_id,
548 mac.addr_bytes[0], mac.addr_bytes[1],
549 mac.addr_bytes[2], mac.addr_bytes[3],
550 mac.addr_bytes[4], mac.addr_bytes[5]);
551 #ifdef RTE_LIBRTE_MLX5_DEBUG
553 char ifname[MLX5_NAMESIZE];
555 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
556 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
557 eth_dev->data->port_id, ifname);
559 DRV_LOG(DEBUG, "port %u ifname is unknown.",
560 eth_dev->data->port_id);
563 /* Get actual MTU if possible. */
564 err = mlx5_get_mtu(eth_dev, &priv->mtu);
569 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
571 /* Initialize burst functions to prevent crashes before link-up. */
572 eth_dev->rx_pkt_burst = removed_rx_burst;
573 eth_dev->tx_pkt_burst = removed_tx_burst;
574 eth_dev->dev_ops = &mlx5_dev_ops;
575 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
576 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
577 eth_dev->rx_queue_count = mlx5_rx_queue_count;
578 /* Register MAC address. */
579 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
581 priv->ctrl_flows = 0;
582 TAILQ_INIT(&priv->flow_meters);
583 TAILQ_INIT(&priv->flow_meter_profiles);
584 /* Bring Ethernet device up. */
585 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
586 eth_dev->data->port_id);
587 /* nl calls are unsupported - set to -1 not to fail on release */
588 priv->nl_socket_rdma = -1;
589 priv->nl_socket_route = -1;
590 mlx5_set_link_up(eth_dev);
592 * Even though the interrupt handler is not installed yet,
593 * interrupts will still trigger on the async_fd from
594 * Verbs context returned by ibv_open_device().
596 mlx5_link_update(eth_dev, 0);
597 config->dv_esw_en = 0;
598 /* Detect minimal data bytes to inline. */
599 mlx5_set_min_inline(spawn, config);
600 /* Store device configuration on private structure. */
601 priv->config = *config;
602 /* Create context for virtual machine VLAN workaround. */
603 priv->vmwa_context = NULL;
604 if (config->dv_flow_en) {
605 err = mlx5_alloc_shared_dr(priv);
609 /* No supported flow priority number detection. */
610 priv->config.flow_prio = -1;
611 if (!priv->config.dv_esw_en &&
612 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
613 DRV_LOG(WARNING, "metadata mode %u is not supported "
614 "(no E-Switch)", priv->config.dv_xmeta_en);
615 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
617 mlx5_set_metadata_mask(eth_dev);
618 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
619 !priv->sh->dv_regc0_mask) {
620 DRV_LOG(ERR, "metadata mode %u is not supported "
621 "(no metadata reg_c[0] is available).",
622 priv->config.dv_xmeta_en);
626 mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
629 mlx5_hrxq_remove_cb);
630 /* Query availability of metadata reg_c's. */
631 err = mlx5_flow_discover_mreg_c(eth_dev);
636 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
638 "port %u extensive metadata register is not supported.",
639 eth_dev->data->port_id);
640 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
641 DRV_LOG(ERR, "metadata mode %u is not supported "
642 "(no metadata registers available).",
643 priv->config.dv_xmeta_en);
648 if (config->devx && config->dv_flow_en) {
649 priv->obj_ops = devx_obj_ops;
651 DRV_LOG(ERR, "Flow mode %u is not supported "
652 "(Windows flow must be DevX with DV flow enabled).",
653 priv->config.dv_flow_en);
657 mlx5_flow_counter_mode_config(eth_dev);
662 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
665 eth_dev->data->dev_private = NULL;
667 if (eth_dev != NULL) {
668 /* mac_addrs must not be freed alone because part of
671 eth_dev->data->mac_addrs = NULL;
672 rte_eth_dev_release_port(eth_dev);
675 mlx5_free_shared_dev_ctx(sh);
676 MLX5_ASSERT(err > 0);
682 * This function should share events between multiple ports of single IB
683 * device. Currently it has no support under Windows.
686 * Pointer to mlx5_dev_ctx_shared object.
689 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
692 DRV_LOG(WARNING, "%s: is not supported", __func__);
696 * This function should share events between multiple ports of single IB
697 * device. Currently it has no support under Windows.
700 * Pointer to mlx5_dev_ctx_shared object.
703 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
706 DRV_LOG(WARNING, "%s: is not supported", __func__);
710 * Read statistics by a named counter.
713 * Pointer to the private device data structure.
714 * @param[in] ctr_name
715 * Pointer to the name of the statistic counter to read
717 * Pointer to read statistic value.
719 * 0 on success and stat is valud, 1 if failed to read the value
724 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
728 RTE_SET_USED(ctr_name);
730 DRV_LOG(WARNING, "%s: is not supported", __func__);
735 * Flush device MAC addresses
736 * Currently it has no support under Windows.
739 * Pointer to Ethernet device structure.
743 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
746 DRV_LOG(WARNING, "%s: is not supported", __func__);
750 * Remove a MAC address from device
751 * Currently it has no support under Windows.
754 * Pointer to Ethernet device structure.
759 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
763 DRV_LOG(WARNING, "%s: is not supported", __func__);
767 * Adds a MAC address to the device
768 * Currently it has no support under Windows.
771 * Pointer to Ethernet device structure.
773 * MAC address to register.
778 * 0 on success, a negative errno value otherwise
781 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
785 struct rte_ether_addr lmac;
787 if (mlx5_get_mac(dev, &lmac.addr_bytes)) {
789 "port %u cannot get MAC address, is mlx5_en"
790 " loaded? (errno: %s)",
791 dev->data->port_id, strerror(rte_errno));
794 if (!rte_is_same_ether_addr(&lmac, mac)) {
796 "adding new mac address to device is unsupported");
803 * Modify a VF MAC address
804 * Currently it has no support under Windows.
807 * Pointer to device private data.
809 * MAC address to modify into.
811 * Net device interface index
816 * 0 on success, a negative errno value otherwise
819 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
820 unsigned int iface_idx,
821 struct rte_ether_addr *mac_addr,
828 DRV_LOG(WARNING, "%s: is not supported", __func__);
833 * Set device promiscuous mode
834 * Currently it has no support under Windows.
837 * Pointer to Ethernet device structure.
839 * 0 - promiscuous is disabled, otherwise - enabled
842 * 0 on success, a negative error value otherwise
845 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
849 DRV_LOG(WARNING, "%s: is not supported", __func__);
854 * Set device allmulti mode
857 * Pointer to Ethernet device structure.
859 * 0 - all multicase is disabled, otherwise - enabled
862 * 0 on success, a negative error value otherwise
865 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
869 DRV_LOG(WARNING, "%s: is not supported", __func__);
874 * Detect if a devx_device_bdf object has identical DBDF values to the
875 * rte_pci_addr found in bus/pci probing
877 * @param[in] devx_bdf
878 * Pointer to the devx_device_bdf structure.
880 * Pointer to the rte_pci_addr structure.
883 * 1 on Device match, 0 on mismatch.
886 mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf,
887 struct rte_pci_addr *addr)
889 if (addr->domain != (devx_bdf->bus_id >> 8) ||
890 addr->bus != (devx_bdf->bus_id & 0xff) ||
891 addr->devid != devx_bdf->dev_id ||
892 addr->function != devx_bdf->fnc_id) {
899 * Detect if a devx_device_bdf object matches the rte_pci_addr
900 * found in bus/pci probing
901 * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF.
903 * @param[in] devx_bdf
904 * Pointer to the devx_device_bdf structure.
906 * Pointer to the rte_pci_addr structure.
909 * 1 on Device match, 0 on mismatch, rte_errno code on failure.
912 mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf,
913 struct rte_pci_addr *addr)
916 struct devx_device mlx5_dev;
918 if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr))
921 * Didn't match on Native/PF BDF, could still
922 * Match a VF BDF, check it next
924 err = mlx5_glue->query_device(devx_bdf, &mlx5_dev);
926 DRV_LOG(ERR, "query_device failed");
930 if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr))
936 * DPDK callback to register a PCI device.
938 * This function spawns Ethernet devices out of a given PCI device.
941 * PCI driver structure (mlx5_driver).
943 * PCI device information.
946 * 0 on success, a negative errno value otherwise and rte_errno is set.
949 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
950 struct rte_pci_device *pci_dev)
952 struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs;
954 * Number of found IB Devices matching with requested PCI BDF.
955 * nd != 1 means there are multiple IB devices over the same
956 * PCI device and we have representors and master.
960 * Number of found IB device Ports. nd = 1 and np = 1..n means
961 * we have the single multiport IB device, and there may be
962 * representors attached to some of found ports.
963 * Currently not supported.
964 * unsigned int np = 0;
968 * Number of DPDK ethernet devices to Spawn - either over
969 * multiple IB devices or multiple ports of single IB device.
970 * Actually this is the number of iterations to spawn.
975 * < 0 - no bonding device (single one)
976 * >= 0 - bonding device (value is slave PF index)
979 struct mlx5_dev_spawn_data *list = NULL;
980 struct mlx5_dev_config dev_config;
981 unsigned int dev_config_vf;
985 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
986 DRV_LOG(ERR, "Secondary process is not supported on Windows.");
989 ret = mlx5_init_once();
991 DRV_LOG(ERR, "unable to init PMD global data: %s",
992 strerror(rte_errno));
996 devx_bdf_devs = mlx5_glue->get_device_list(&ret);
997 orig_devx_bdf_devs = devx_bdf_devs;
998 if (!devx_bdf_devs) {
999 rte_errno = errno ? errno : ENOSYS;
1000 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1004 * First scan the list of all Infiniband devices to find
1005 * matching ones, gathering into the list.
1007 struct devx_device_bdf *devx_bdf_match[ret + 1];
1010 err = mlx5_match_devx_devices_to_addr(devx_bdf_devs,
1020 devx_bdf_match[nd++] = devx_bdf_devs;
1022 devx_bdf_match[nd] = NULL;
1024 /* No device matches, just complain and bail out. */
1026 "no DevX device matches PCI device " PCI_PRI_FMT ","
1027 " is DevX Configured?",
1028 pci_dev->addr.domain, pci_dev->addr.bus,
1029 pci_dev->addr.devid, pci_dev->addr.function);
1035 * Now we can determine the maximal
1036 * amount of devices to be spawned.
1038 list = mlx5_malloc(MLX5_MEM_ZERO,
1039 sizeof(struct mlx5_dev_spawn_data),
1040 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1042 DRV_LOG(ERR, "spawn data array allocation failure");
1047 memset(&list[ns].info, 0, sizeof(list[ns].info));
1048 list[ns].max_port = 1;
1049 list[ns].phys_port = 1;
1050 list[ns].phys_dev = devx_bdf_match[ns];
1051 list[ns].eth_dev = NULL;
1052 list[ns].pci_dev = pci_dev;
1053 list[ns].pf_bond = bd;
1054 list[ns].ifindex = -1; /* Spawn will assign */
1056 (struct mlx5_switch_info){
1059 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
1063 /* Device specific configuration. */
1064 switch (pci_dev->id.device_id) {
1065 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1066 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1067 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1068 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1069 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
1070 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
1071 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
1078 /* Default configuration. */
1079 memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
1080 dev_config.vf = dev_config_vf;
1082 dev_config.dbnc = MLX5_ARG_UNSET;
1083 dev_config.rx_vec_en = 1;
1084 dev_config.txq_inline_max = MLX5_ARG_UNSET;
1085 dev_config.txq_inline_min = MLX5_ARG_UNSET;
1086 dev_config.txq_inline_mpw = MLX5_ARG_UNSET;
1087 dev_config.txqs_inline = MLX5_ARG_UNSET;
1088 dev_config.vf_nl_en = 0;
1089 dev_config.mr_ext_memseg_en = 1;
1090 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
1091 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
1092 dev_config.dv_esw_en = 0;
1093 dev_config.dv_flow_en = 1;
1094 dev_config.decap_en = 0;
1095 dev_config.log_hp_size = MLX5_ARG_UNSET;
1096 list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device,
1099 if (!list[ns].eth_dev)
1101 restore = list[ns].eth_dev->data->dev_flags;
1102 rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev);
1103 /* Restore non-PCI flags cleared by the above call. */
1104 list[ns].eth_dev->data->dev_flags |= restore;
1105 rte_eth_dev_probing_finish(list[ns].eth_dev);
1109 * Do the routine cleanup:
1110 * - free allocated spawn data array
1111 * - free the device list
1115 MLX5_ASSERT(orig_devx_bdf_devs);
1116 mlx5_glue->free_device_list(orig_devx_bdf_devs);
1121 * Set the reg_mr and dereg_mr call backs
1123 * @param reg_mr_cb[out]
1124 * Pointer to reg_mr func
1125 * @param dereg_mr_cb[out]
1126 * Pointer to dereg_mr func
1130 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
1131 mlx5_dereg_mr_t *dereg_mr_cb)
1133 *reg_mr_cb = mlx5_os_reg_mr;
1134 *dereg_mr_cb = mlx5_os_dereg_mr;
1138 * Extract pdn of PD object using DevX
1141 * Pointer to the DevX PD object.
1143 * Pointer to the PD object number variable.
1146 * 0 on success, error value otherwise.
1149 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1154 *pdn = ((struct mlx5_pd *)pd)->pdn;
1158 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};