net/mlx5: support yellow meter action for hierarchy tag rule
[dpdk.git] / drivers / net / mlx5 / windows / mlx5_os.c
index 7bcce9d..77f04cc 100644 (file)
@@ -9,6 +9,7 @@
 #include <stdlib.h>
 
 #include <rte_windows.h>
+#include <ethdev_pci.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
 #include "mlx5_common_os.h"
 #include "mlx5_utils.h"
 #include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
 #include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
 #include "mlx5_flow.h"
+#include "mlx5_devx.h"
+
+static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
+
+/* Spinlock for mlx5_shared_data allocation. */
+static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+       {
+               .size = sizeof(struct rte_flow),
+               .trunk_size = 64,
+               .need_lock = 1,
+               .release_mem_en = 0,
+               .malloc = mlx5_malloc,
+               .free = mlx5_free,
+               .per_core_cache = 0,
+               .type = "ctl_flow_ipool",
+       },
+       {
+               .size = sizeof(struct rte_flow),
+               .trunk_size = 64,
+               .grow_trunk = 3,
+               .grow_shift = 2,
+               .need_lock = 1,
+               .release_mem_en = 0,
+               .malloc = mlx5_malloc,
+               .free = mlx5_free,
+               .per_core_cache = 1 << 14,
+               .type = "rte_flow_ipool",
+       },
+       {
+               .size = sizeof(struct rte_flow),
+               .trunk_size = 64,
+               .grow_trunk = 3,
+               .grow_shift = 2,
+               .need_lock = 1,
+               .release_mem_en = 0,
+               .malloc = mlx5_malloc,
+               .free = mlx5_free,
+               .per_core_cache = 0,
+               .type = "mcp_flow_ipool",
+       },
+};
+
+static void
+mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       void *ctx = priv->sh->cdev->ctx;
+
+       priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
+       if (!priv->q_counters) {
+               DRV_LOG(ERR, "Port %d queue counter object cannot be created "
+                       "by DevX - imissed counter will be unavailable",
+                       dev->data->port_id);
+               return;
+       }
+       priv->counter_set_id = priv->q_counters->id;
+}
+
+/**
+ * Initialize shared data between primary and secondary process.
+ *
+ * A memzone is reserved by primary process and secondary processes attach to
+ * the memzone.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_shared_data(void)
+{
+       const struct rte_memzone *mz;
+       int ret = 0;
+
+       rte_spinlock_lock(&mlx5_shared_data_lock);
+       if (mlx5_shared_data == NULL) {
+               /* Allocate shared memory. */
+               mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
+                                        sizeof(*mlx5_shared_data),
+                                        SOCKET_ID_ANY, 0);
+               if (mz == NULL) {
+                       DRV_LOG(ERR,
+                               "Cannot allocate mlx5 shared data");
+                       ret = -rte_errno;
+                       goto error;
+               }
+               mlx5_shared_data = mz->addr;
+               memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
+               rte_spinlock_init(&mlx5_shared_data->lock);
+       }
+error:
+       rte_spinlock_unlock(&mlx5_shared_data_lock);
+       return ret;
+}
 
 /**
- * Get mlx5 device attributes.
+ * PMD global initialization.
+ *
+ * Independent from individual device, this function initializes global
+ * per-PMD data structures distinguishing primary and secondary processes.
+ * Hence, each initialization is called once per a process.
  *
- * @param ctx
- *   Pointer to device context.
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_once(void)
+{
+       if (mlx5_init_shared_data())
+               return -rte_errno;
+       return 0;
+}
+
+/**
+ * Get mlx5 device capabilities.
  *
- * @param device_attr
- *   Pointer to mlx5 device attributes.
+ * @param sh
+ *   Pointer to shared device context.
  *
  * @return
- *   0 on success, non zero error number otherwise
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
 {
-       struct mlx5_context *mlx5_ctx;
-       struct mlx5_hca_attr hca_attr;
+       struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+       struct mlx5_context *mlx5_ctx = sh->cdev->ctx;
        void *pv_iseg = NULL;
        u32 cb_iseg = 0;
-       int err = 0;
 
-       if (!ctx)
-               return -EINVAL;
-       mlx5_ctx = (struct mlx5_context *)ctx;
-       memset(device_attr, 0, sizeof(*device_attr));
-       err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr);
-       if (err) {
-               DRV_LOG(ERR, "Failed to get device hca_cap");
-               return err;
-       }
-       device_attr->max_cq = 1 << hca_attr.log_max_cq;
-       device_attr->max_qp = 1 << hca_attr.log_max_qp;
-       device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz;
-       device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz;
-       device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz;
-       device_attr->max_pd = 1 << hca_attr.log_max_pd;
-       device_attr->max_srq = 1 << hca_attr.log_max_srq;
-       device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz;
-       if (hca_attr.rss_ind_tbl_cap) {
-               device_attr->max_rwq_indirection_table_size =
-                       1 << hca_attr.rss_ind_tbl_cap;
-       }
+       MLX5_ASSERT(sh->cdev->config.devx);
+       MLX5_ASSERT(mlx5_dev_is_pci(sh->cdev->dev));
        pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
        if (pv_iseg == NULL) {
-               DRV_LOG(ERR, "Failed to get device hca_iseg");
-               return errno;
+               DRV_LOG(ERR, "Failed to get device hca_iseg.");
+               rte_errno = errno;
+               return -rte_errno;
        }
-       if (!err) {
-               snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
-                       MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
-                       MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
-                       MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
+       memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+       sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(sh->cdev->dev));
+       sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
+       sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
+       sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
+       sh->dev_cap.dv_flow_en = 1;
+       sh->dev_cap.mps = MLX5_MPW_DISABLED;
+       DRV_LOG(DEBUG, "MPW isn't supported.");
+       DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported.");
+       sh->dev_cap.hw_csum = hca_attr->csum_cap;
+       DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
+               (sh->dev_cap.hw_csum ? "" : "not "));
+       sh->dev_cap.hw_vlan_strip = hca_attr->vlan_cap;
+       DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
+               (sh->dev_cap.hw_vlan_strip ? "" : "not "));
+       sh->dev_cap.hw_fcs_strip = hca_attr->scatter_fcs;
+       sh->dev_cap.tso = ((1 << hca_attr->max_lso_cap) > 0);
+       if (sh->dev_cap.tso)
+               sh->dev_cap.tso_max_payload_sz = 1 << hca_attr->max_lso_cap;
+       DRV_LOG(DEBUG, "Counters are not supported.");
+       if (hca_attr->rss_ind_tbl_cap) {
+               /*
+                * DPDK doesn't support larger/variable indirection tables.
+                * Once DPDK supports it, take max size from device attr.
+                */
+               sh->dev_cap.ind_table_max_size =
+                       RTE_MIN(1 << hca_attr->rss_ind_tbl_cap,
+                               (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
+               DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
+                       sh->dev_cap.ind_table_max_size);
        }
+       sh->dev_cap.swp = mlx5_get_supported_sw_parsing_offloads(hca_attr);
+       sh->dev_cap.tunnel_en = mlx5_get_supported_tunneling_offloads(hca_attr);
+       if (sh->dev_cap.tunnel_en) {
+               DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
+                       sh->dev_cap.tunnel_en &
+                       MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
+                       sh->dev_cap.tunnel_en &
+                       MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
+                       sh->dev_cap.tunnel_en &
+                       MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
+       } else {
+               DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
+       }
+       snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
+                MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
+                MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
+                MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
+       DRV_LOG(DEBUG, "Packet pacing is not supported.");
+       mlx5_rt_timestamp_config(sh, hca_attr);
+       return 0;
+}
+
+/**
+ * Initialize DR related data within private structure.
+ * Routine checks the reference counter and does actual
+ * resources creation/initialization only if counter is zero.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ *
+ * @return
+ *   Zero on success, positive error code otherwise.
+ */
+static int
+mlx5_alloc_shared_dr(struct mlx5_priv *priv)
+{
+       struct mlx5_dev_ctx_shared *sh = priv->sh;
+       int err = 0;
+
+       if (!sh->flow_tbls)
+               err = mlx5_alloc_table_hash_list(priv);
+       else
+               DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse",
+                       (void *)sh->flow_tbls);
        return err;
 }
+/**
+ * Destroy DR related data within private structure.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ */
+void
+mlx5_os_free_shared_dr(struct mlx5_priv *priv)
+{
+       mlx5_free_table_hash_list(priv);
+}
 
 /**
  * Set the completion channel file descriptor interrupt as non-blocking.
@@ -90,7 +264,7 @@ mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
  *   Pointer to RQ channel object, which includes the channel fd
  *
  * @param[out] fd
- *   The file descriptor (representing the intetrrupt) used in this channel.
+ *   The file descriptor (representing the interrupt) used in this channel.
  *
  * @return
  *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
@@ -103,6 +277,303 @@ mlx5_os_set_nonblock_channel_fd(int fd)
        return -ENOTSUP;
 }
 
+/**
+ * Spawn an Ethernet device from DevX information.
+ *
+ * @param dpdk_dev
+ *   Backing DPDK device.
+ * @param spawn
+ *   Verbs device parameters (name, port, switch_info) to spawn.
+ * @param mkvlist
+ *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
+ *
+ * @return
+ *   A valid Ethernet device object on success, NULL otherwise and rte_errno
+ *   is set. The following errors are defined:
+ *
+ *   EEXIST: device is already spawned
+ */
+static struct rte_eth_dev *
+mlx5_dev_spawn(struct rte_device *dpdk_dev,
+              struct mlx5_dev_spawn_data *spawn,
+              struct mlx5_kvargs_ctrl *mkvlist)
+{
+       const struct mlx5_switch_info *switch_info = &spawn->info;
+       struct mlx5_dev_ctx_shared *sh = NULL;
+       struct rte_eth_dev *eth_dev = NULL;
+       struct mlx5_priv *priv = NULL;
+       int err = 0;
+       struct rte_ether_addr mac;
+       char name[RTE_ETH_NAME_MAX_LEN];
+       int own_domain_id = 0;
+       uint16_t port_id;
+       int i;
+
+       /* Build device name. */
+       strlcpy(name, dpdk_dev->name, sizeof(name));
+       /* check if the device is already spawned */
+       if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
+               rte_errno = EEXIST;
+               return NULL;
+       }
+       DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
+       sh = mlx5_alloc_shared_dev_ctx(spawn, mkvlist);
+       if (!sh)
+               return NULL;
+       if (!sh->config.dv_flow_en) {
+               DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
+               err = ENOTSUP;
+               goto error;
+       }
+       if (sh->config.vf_nl_en) {
+               DRV_LOG(DEBUG, "VF netlink isn't supported.");
+               sh->config.vf_nl_en = 0;
+       }
+       /* Initialize the shutdown event in mlx5_dev_spawn to
+        * support mlx5_is_removed for Windows.
+        */
+       err = mlx5_glue->devx_init_showdown_event(sh->cdev->ctx);
+       if (err) {
+               DRV_LOG(ERR, "failed to init showdown event: %s",
+                       strerror(errno));
+               goto error;
+       }
+       /* Allocate private eth device data. */
+       priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+                          sizeof(*priv),
+                          RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+       if (priv == NULL) {
+               DRV_LOG(ERR, "priv allocation failure");
+               err = ENOMEM;
+               goto error;
+       }
+       priv->sh = sh;
+       priv->dev_port = spawn->phys_port;
+       priv->pci_dev = spawn->pci_dev;
+       priv->mtu = RTE_ETHER_MTU;
+       priv->mp_id.port_id = port_id;
+       strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
+       priv->representor = !!switch_info->representor;
+       priv->master = !!switch_info->master;
+       priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+       priv->vport_meta_tag = 0;
+       priv->vport_meta_mask = 0;
+       priv->pf_bond = spawn->pf_bond;
+       priv->vport_id = -1;
+       /* representor_id field keeps the unmodified VF index. */
+       priv->representor_id = -1;
+       /*
+        * Look for sibling devices in order to reuse their switch domain
+        * if any, otherwise allocate one.
+        */
+       MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
+               const struct mlx5_priv *opriv =
+                       rte_eth_devices[port_id].data->dev_private;
+
+               if (!opriv ||
+                   opriv->sh != priv->sh ||
+                       opriv->domain_id ==
+                       RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
+                       continue;
+               priv->domain_id = opriv->domain_id;
+               break;
+       }
+       if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+               err = rte_eth_switch_domain_alloc(&priv->domain_id);
+               if (err) {
+                       err = rte_errno;
+                       DRV_LOG(ERR, "unable to allocate switch domain: %s",
+                               strerror(rte_errno));
+                       goto error;
+               }
+               own_domain_id = 1;
+       }
+       /* Process parameters and store port configuration on priv structure. */
+       err = mlx5_port_args_config(priv, mkvlist, &priv->config);
+       if (err) {
+               err = rte_errno;
+               DRV_LOG(ERR, "Failed to process port configure: %s",
+                       strerror(rte_errno));
+               goto error;
+       }
+       eth_dev = rte_eth_dev_allocate(name);
+       if (eth_dev == NULL) {
+               DRV_LOG(ERR, "can not allocate rte ethdev");
+               err = ENOMEM;
+               goto error;
+       }
+       if (priv->representor) {
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+               eth_dev->data->representor_id = priv->representor_id;
+               MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
+                       struct mlx5_priv *opriv =
+                               rte_eth_devices[port_id].data->dev_private;
+                       if (opriv &&
+                           opriv->master &&
+                           opriv->domain_id == priv->domain_id &&
+                           opriv->sh == priv->sh) {
+                               eth_dev->data->backer_port_id = port_id;
+                               break;
+                       }
+               }
+               if (port_id >= RTE_MAX_ETHPORTS)
+                       eth_dev->data->backer_port_id = eth_dev->data->port_id;
+       }
+       /*
+        * Store associated network device interface index. This index
+        * is permanent throughout the lifetime of device. So, we may store
+        * the ifindex here and use the cached value further.
+        */
+       MLX5_ASSERT(spawn->ifindex);
+       priv->if_index = spawn->ifindex;
+       eth_dev->data->dev_private = priv;
+       priv->dev_data = eth_dev->data;
+       eth_dev->data->mac_addrs = priv->mac;
+       eth_dev->device = dpdk_dev;
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+       /* Configure the first MAC address by default. */
+       if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
+               DRV_LOG(ERR,
+                       "port %u cannot get MAC address, is mlx5_en"
+                       " loaded? (errno: %s).",
+                       eth_dev->data->port_id, strerror(rte_errno));
+               err = ENODEV;
+               goto error;
+       }
+       DRV_LOG(INFO,
+               "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
+               eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+       {
+               char ifname[MLX5_NAMESIZE];
+
+               if (mlx5_get_ifname(eth_dev, &ifname) == 0)
+                       DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
+                               eth_dev->data->port_id, ifname);
+               else
+                       DRV_LOG(DEBUG, "port %u ifname is unknown.",
+                               eth_dev->data->port_id);
+       }
+#endif
+       /* Get actual MTU if possible. */
+       err = mlx5_get_mtu(eth_dev, &priv->mtu);
+       if (err) {
+               err = rte_errno;
+               goto error;
+       }
+       DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id,
+               priv->mtu);
+       /* Initialize burst functions to prevent crashes before link-up. */
+       eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+       eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+       eth_dev->dev_ops = &mlx5_dev_ops;
+       eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
+       eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
+       eth_dev->rx_queue_count = mlx5_rx_queue_count;
+       /* Register MAC address. */
+       claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
+       priv->ctrl_flows = 0;
+       TAILQ_INIT(&priv->flow_meters);
+       priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
+       if (!priv->mtr_profile_tbl)
+               goto error;
+       /* Bring Ethernet device up. */
+       DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.",
+               eth_dev->data->port_id);
+       /* nl calls are unsupported - set to -1 not to fail on release */
+       priv->nl_socket_rdma = -1;
+       priv->nl_socket_route = -1;
+       mlx5_set_link_up(eth_dev);
+       /*
+        * Even though the interrupt handler is not installed yet,
+        * interrupts will still trigger on the async_fd from
+        * Verbs context returned by ibv_open_device().
+        */
+       mlx5_link_update(eth_dev, 0);
+       for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+               icfg[i].release_mem_en = !!sh->config.reclaim_mode;
+               if (sh->config.reclaim_mode)
+                       icfg[i].per_core_cache = 0;
+               priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+               if (!priv->flows[i])
+                       goto error;
+       }
+       /* Create context for virtual machine VLAN workaround. */
+       priv->vmwa_context = NULL;
+       if (sh->config.dv_flow_en) {
+               err = mlx5_alloc_shared_dr(priv);
+               if (err)
+                       goto error;
+       }
+       /* No supported flow priority number detection. */
+       priv->sh->flow_max_priority = -1;
+       mlx5_set_metadata_mask(eth_dev);
+       if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+           !priv->sh->dv_regc0_mask) {
+               DRV_LOG(ERR, "metadata mode %u is not supported "
+                            "(no metadata reg_c[0] is available).",
+                            sh->config.dv_xmeta_en);
+                       err = ENOTSUP;
+                       goto error;
+       }
+       priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
+               mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
+               mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb,
+               mlx5_hrxq_clone_free_cb);
+       /* Query availability of metadata reg_c's. */
+       if (!priv->sh->metadata_regc_check_flag) {
+               err = mlx5_flow_discover_mreg_c(eth_dev);
+               if (err < 0) {
+                       err = -err;
+                       goto error;
+               }
+       }
+       if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
+               DRV_LOG(DEBUG,
+                       "port %u extensive metadata register is not supported.",
+                       eth_dev->data->port_id);
+               if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+                       DRV_LOG(ERR, "metadata mode %u is not supported "
+                                    "(no metadata registers available).",
+                                    sh->config.dv_xmeta_en);
+                       err = ENOTSUP;
+                       goto error;
+               }
+       }
+       if (sh->cdev->config.devx) {
+               priv->obj_ops = devx_obj_ops;
+       } else {
+               DRV_LOG(ERR, "Windows flow must be DevX.");
+               err = ENOTSUP;
+               goto error;
+       }
+       mlx5_flow_counter_mode_config(eth_dev);
+       mlx5_queue_counter_id_prepare(eth_dev);
+       return eth_dev;
+error:
+       if (priv) {
+               if (priv->mtr_profile_tbl)
+                       mlx5_l3t_destroy(priv->mtr_profile_tbl);
+               if (own_domain_id)
+                       claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+               mlx5_free(priv);
+               if (eth_dev != NULL)
+                       eth_dev->data->dev_private = NULL;
+       }
+       if (eth_dev != NULL) {
+               /* mac_addrs must not be freed alone because part of
+                * dev_private
+                **/
+               eth_dev->data->mac_addrs = NULL;
+               rte_eth_dev_release_port(eth_dev);
+       }
+       if (sh)
+               mlx5_free_shared_dev_ctx(sh);
+       MLX5_ASSERT(err > 0);
+       rte_errno = err;
+       return NULL;
+}
+
 /**
  * This function should share events between multiple ports of single IB
  * device.  Currently it has no support under Windows.
@@ -141,7 +612,8 @@ mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
  * @param[out] stat
  *   Pointer to read statistic value.
  * @return
- *   0 on success and stat is valud, 1 if failed to read the value
+ *   0 on success and stat is valid, non-zero if failed to read the value
+ *   or counter is not supported.
  *   rte_errno is set.
  *
  */
@@ -149,10 +621,11 @@ int
 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
                      uint64_t *stat)
 {
-       RTE_SET_USED(priv);
-       RTE_SET_USED(ctr_name);
-       RTE_SET_USED(stat);
-       DRV_LOG(WARNING, "%s: is not supported", __func__);
+       if (priv->q_counters != NULL && strcmp(ctr_name, "out_of_buffer") == 0)
+               return mlx5_devx_cmd_queue_counter_query
+                               (priv->q_counters, 0, (uint32_t *)stat);
+       DRV_LOG(WARNING, "%s: is not supported for the %s counter",
+               __func__, ctr_name);
        return -ENOTSUP;
 }
 
@@ -256,7 +729,6 @@ mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
 
 /**
  * Set device promiscuous mode
- * Currently it has no support under Windows.
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -269,10 +741,9 @@ mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
 int
 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
 {
-       (void)dev;
-       (void)enable;
-       DRV_LOG(WARNING, "%s: is not supported", __func__);
-       return -ENOTSUP;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       return mlx5_glue->devx_set_promisc_vport(priv->sh->cdev->ctx, ALL_PROMISC, enable);
 }
 
 /**
@@ -289,118 +760,71 @@ mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
 int
 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
 {
-       (void)dev;
-       (void)enable;
-       DRV_LOG(WARNING, "%s: is not supported", __func__);
-       return -ENOTSUP;
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       return mlx5_glue->devx_set_promisc_vport(priv->sh->cdev->ctx, MC_PROMISC, enable);
 }
 
-const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function spawns Ethernet devices out of a given device.
+ *
+ * @param[in] cdev
+ *   Pointer to the common device.
+ * @param[in, out] mkvlist
+ *   Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_os_net_probe(struct mlx5_common_device *cdev,
+                 struct mlx5_kvargs_ctrl *mkvlist)
+{
+       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
+       struct mlx5_dev_spawn_data spawn = {
+               .pf_bond = -1,
+               .max_port = 1,
+               .phys_port = 1,
+               .phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx),
+               .pci_dev = pci_dev,
+               .cdev = cdev,
+               .ifindex = -1, /* Spawn will assign */
+               .info = (struct mlx5_switch_info){
+                       .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
+               },
+       };
+       int ret;
+       uint32_t restore;
 
-const struct eth_dev_ops mlx5_os_dev_ops = {
-       .dev_configure = mlx5_dev_configure,
-       .dev_start = mlx5_dev_start,
-       .dev_stop = mlx5_dev_stop,
-       .dev_close = mlx5_dev_close,
-       .mtu_set = mlx5_dev_set_mtu,
-       .link_update = mlx5_link_update,
-       .stats_get = mlx5_stats_get,
-       .stats_reset = mlx5_stats_reset,
-       .dev_infos_get = mlx5_dev_infos_get,
-       .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
-       .promiscuous_enable = mlx5_promiscuous_enable,
-       .promiscuous_disable = mlx5_promiscuous_disable,
-       .allmulticast_enable = mlx5_allmulticast_enable,
-       .allmulticast_disable = mlx5_allmulticast_disable,
-       .xstats_get = mlx5_xstats_get,
-       .xstats_reset = mlx5_xstats_reset,
-       .xstats_get_names = mlx5_xstats_get_names,
-       .fw_version_get = mlx5_fw_version_get,
-       .read_clock = mlx5_read_clock,
-       .vlan_filter_set = mlx5_vlan_filter_set,
-       .rx_queue_setup = mlx5_rx_queue_setup,
-       .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
-       .tx_queue_setup = mlx5_tx_queue_setup,
-       .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
-       .rx_queue_release = mlx5_rx_queue_release,
-       .tx_queue_release = mlx5_tx_queue_release,
-       .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
-       .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
-       .mac_addr_remove = mlx5_mac_addr_remove,
-       .mac_addr_add = mlx5_mac_addr_add,
-       .mac_addr_set = mlx5_mac_addr_set,
-       .set_mc_addr_list = mlx5_set_mc_addr_list,
-       .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
-       .vlan_offload_set = mlx5_vlan_offload_set,
-       .reta_update = mlx5_dev_rss_reta_update,
-       .reta_query = mlx5_dev_rss_reta_query,
-       .rss_hash_update = mlx5_rss_hash_update,
-       .rss_hash_conf_get = mlx5_rss_hash_conf_get,
-       .filter_ctrl = mlx5_dev_filter_ctrl,
-       .rxq_info_get = mlx5_rxq_info_get,
-       .txq_info_get = mlx5_txq_info_get,
-       .rx_burst_mode_get = mlx5_rx_burst_mode_get,
-       .tx_burst_mode_get = mlx5_tx_burst_mode_get,
-       .rx_queue_intr_enable = mlx5_rx_intr_enable,
-       .rx_queue_intr_disable = mlx5_rx_intr_disable,
-       .is_removed = mlx5_is_removed,
-       .udp_tunnel_port_add  = mlx5_udp_tunnel_port_add,
-       .get_module_info = mlx5_get_module_info,
-       .get_module_eeprom = mlx5_get_module_eeprom,
-       .hairpin_cap_get = mlx5_hairpin_cap_get,
-       .mtr_ops_get = mlx5_flow_meter_ops_get,
-};
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+               DRV_LOG(ERR, "Secondary process is not supported on Windows.");
+               return -ENOTSUP;
+       }
+       ret = mlx5_init_once();
+       if (ret) {
+               DRV_LOG(ERR, "unable to init PMD global data: %s",
+                       strerror(rte_errno));
+               return -rte_errno;
+       }
+       spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, mkvlist);
+       if (!spawn.eth_dev)
+               return -rte_errno;
+       restore = spawn.eth_dev->data->dev_flags;
+       rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
+       /* Restore non-PCI flags cleared by the above call. */
+       spawn.eth_dev->data->dev_flags |= restore;
+       rte_eth_dev_probing_finish(spawn.eth_dev);
+       return 0;
+}
 
-/* Available operations from secondary process. */
-const struct eth_dev_ops mlx5_os_dev_sec_ops = {0};
-
-/* Available operations in flow isolated mode. */
-const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
-       .dev_configure = mlx5_dev_configure,
-       .dev_start = mlx5_dev_start,
-       .dev_stop = mlx5_dev_stop,
-       .dev_close = mlx5_dev_close,
-       .mtu_set = mlx5_dev_set_mtu,
-       .link_update = mlx5_link_update,
-       .stats_get = mlx5_stats_get,
-       .stats_reset = mlx5_stats_reset,
-       .dev_infos_get = mlx5_dev_infos_get,
-       .dev_set_link_down = mlx5_set_link_down,
-       .dev_set_link_up = mlx5_set_link_up,
-       .promiscuous_enable = mlx5_promiscuous_enable,
-       .promiscuous_disable = mlx5_promiscuous_disable,
-       .allmulticast_enable = mlx5_allmulticast_enable,
-       .allmulticast_disable = mlx5_allmulticast_disable,
-       .xstats_get = mlx5_xstats_get,
-       .xstats_reset = mlx5_xstats_reset,
-       .xstats_get_names = mlx5_xstats_get_names,
-       .fw_version_get = mlx5_fw_version_get,
-       .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
-       .vlan_filter_set = mlx5_vlan_filter_set,
-       .rx_queue_setup = mlx5_rx_queue_setup,
-       .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
-       .tx_queue_setup = mlx5_tx_queue_setup,
-       .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
-       .rx_queue_release = mlx5_rx_queue_release,
-       .tx_queue_release = mlx5_tx_queue_release,
-       .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
-       .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
-       .mac_addr_remove = mlx5_mac_addr_remove,
-       .mac_addr_add = mlx5_mac_addr_add,
-       .mac_addr_set = mlx5_mac_addr_set,
-       .set_mc_addr_list = mlx5_set_mc_addr_list,
-       .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
-       .vlan_offload_set = mlx5_vlan_offload_set,
-       .filter_ctrl = mlx5_dev_filter_ctrl,
-       .rxq_info_get = mlx5_rxq_info_get,
-       .txq_info_get = mlx5_txq_info_get,
-       .rx_burst_mode_get = mlx5_rx_burst_mode_get,
-       .tx_burst_mode_get = mlx5_tx_burst_mode_get,
-       .rx_queue_intr_enable = mlx5_rx_intr_enable,
-       .rx_queue_intr_disable = mlx5_rx_intr_disable,
-       .is_removed = mlx5_is_removed,
-       .get_module_info = mlx5_get_module_info,
-       .get_module_eeprom = mlx5_get_module_eeprom,
-       .hairpin_cap_get = mlx5_hairpin_cap_get,
-       .mtr_ops_get = mlx5_flow_meter_ops_get,
-};
+/**
+ * Cleanup resources when the last device is closed.
+ */
+void
+mlx5_os_net_cleanup(void)
+{
+}
+
+const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};