net/mlx5: spawn ethdev ports on Windows
[dpdk.git] / drivers / net / mlx5 / mlx5.c
index 31011c3..60301d3 100644 (file)
@@ -21,6 +21,7 @@
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
 #include <rte_alarm.h>
+#include <rte_cycles.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
@@ -37,6 +38,7 @@
 #include "mlx5_autoconf.h"
 #include "mlx5_mr.h"
 #include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
 #include "rte_pmd_mlx5.h"
 
 /* Device parameter to enable RX completion queue compression. */
@@ -265,6 +267,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
        },
        [MLX5_IPOOL_TUNNEL_ID] = {
                .size = sizeof(struct mlx5_flow_tunnel),
+               .trunk_size = MLX5_MAX_TUNNELS,
                .need_lock = 1,
                .release_mem_en = 1,
                .type = "mlx5_tunnel_offload",
@@ -413,13 +416,13 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
                        for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
                                if (pool->actions[j].dr_action)
                                        claim_zero
-                                               (mlx5_glue->destroy_flow_action
-                                                 (pool->actions[j].dr_action));
+                                           (mlx5_flow_os_destroy_flow_action
+                                             (pool->actions[j].dr_action));
                        mlx5_free(pool);
                }
                mlx5_free(sh->aso_age_mng->pools);
        }
-       memset(&sh->aso_age_mng, 0, sizeof(sh->aso_age_mng));
+       mlx5_free(sh->aso_age_mng);
 }
 
 /**
@@ -480,7 +483,7 @@ mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
 
        LIST_REMOVE(mng, next);
        claim_zero(mlx5_devx_cmd_destroy(mng->dm));
-       claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
+       claim_zero(mlx5_os_umem_dereg(mng->umem));
        mlx5_free(mem);
 }
 
@@ -521,7 +524,7 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
 
                                if (cnt->action)
                                        claim_zero
-                                        (mlx5_glue->destroy_flow_action
+                                        (mlx5_flow_os_destroy_flow_action
                                          (cnt->action));
                                if (fallback && MLX5_POOL_GET_CNT
                                    (pool, j)->dcs_when_free)
@@ -915,6 +918,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                goto error;
        }
        sh->refcnt = 1;
+       sh->bond_dev = UINT16_MAX;
        sh->max_port = spawn->max_port;
        strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
                sizeof(sh->ibdev_name) - 1);
@@ -929,7 +933,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
                sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
        }
-       sh->pd = mlx5_glue->alloc_pd(sh->ctx);
+       sh->pd = mlx5_os_alloc_pd(sh->ctx);
        if (sh->pd == NULL) {
                DRV_LOG(ERR, "PD allocation failure");
                err = ENOMEM;
@@ -1029,7 +1033,7 @@ error:
        if (sh->tx_uar)
                mlx5_glue->devx_free_uar(sh->tx_uar);
        if (sh->pd)
-               claim_zero(mlx5_glue->dealloc_pd(sh->pd));
+               claim_zero(mlx5_os_dealloc_pd(sh->pd));
        if (sh->ctx)
                claim_zero(mlx5_glue->close_device(sh->ctx));
        mlx5_free(sh);
@@ -1097,7 +1101,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                sh->tx_uar = NULL;
        }
        if (sh->pd)
-               claim_zero(mlx5_glue->dealloc_pd(sh->pd));
+               claim_zero(mlx5_os_dealloc_pd(sh->pd));
        if (sh->tis)
                claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
@@ -1151,7 +1155,8 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
        MLX5_ASSERT(sh);
        snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
        sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
-                                         0, 0, flow_dv_tbl_create_cb, NULL,
+                                         0, 0, flow_dv_tbl_create_cb,
+                                         flow_dv_tbl_match_cb,
                                          flow_dv_tbl_remove_cb);
        if (!sh->flow_tbls) {
                DRV_LOG(ERR, "flow tables with hash creation failed.");
@@ -1327,7 +1332,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        mlx5_flex_parser_ecpri_release(dev);
        if (priv->rxqs != NULL) {
                /* XXX race condition if mlx5_rx_burst() is still running. */
-               usleep(1000);
+               rte_delay_us_sleep(1000);
                for (i = 0; (i != priv->rxqs_n); ++i)
                        mlx5_rxq_release(dev, i);
                priv->rxqs_n = 0;
@@ -1335,7 +1340,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        if (priv->txqs != NULL) {
                /* XXX race condition if mlx5_tx_burst() is still running. */
-               usleep(1000);
+               rte_delay_us_sleep(1000);
                for (i = 0; (i != priv->txqs_n); ++i)
                        mlx5_txq_release(dev, i);
                priv->txqs_n = 0;
@@ -2034,7 +2039,7 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
        },
        {
                RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
-                               PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF)
+                               PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
        },
        {
                RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,