net/mlx5: enable more shared code on Windows
[dpdk.git] / drivers / net / mlx5 / mlx5_devx.c
index 34044fc..da3bb78 100644 (file)
@@ -23,7 +23,7 @@
 #include "mlx5_utils.h"
 #include "mlx5_devx.h"
 #include "mlx5_flow.h"
-
+#include "mlx5_flow_os.h"
 
 /**
  * Modify RQ vlan stripping offload
@@ -154,14 +154,14 @@ mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
        struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
 
+       if (rxq_ctrl->wq_umem) {
+               mlx5_os_umem_dereg(rxq_ctrl->wq_umem);
+               rxq_ctrl->wq_umem = NULL;
+       }
        if (rxq_ctrl->rxq.wqes) {
                mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
                rxq_ctrl->rxq.wqes = NULL;
        }
-       if (rxq_ctrl->wq_umem) {
-               mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
-               rxq_ctrl->wq_umem = NULL;
-       }
        if (dbr_page) {
                claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
                                            mlx5_os_get_umem_id(dbr_page->umem),
@@ -181,14 +181,14 @@ mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
        struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
 
+       if (rxq_ctrl->cq_umem) {
+               mlx5_os_umem_dereg(rxq_ctrl->cq_umem);
+               rxq_ctrl->cq_umem = NULL;
+       }
        if (rxq_ctrl->rxq.cqes) {
                rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
                rxq_ctrl->rxq.cqes = NULL;
        }
-       if (rxq_ctrl->cq_umem) {
-               mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
-               rxq_ctrl->cq_umem = NULL;
-       }
        if (dbr_page) {
                claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
                                            mlx5_os_get_umem_id(dbr_page->umem),
@@ -216,7 +216,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
                claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
                claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
                if (rxq_obj->devx_channel)
-                       mlx5_glue->devx_destroy_event_channel
+                       mlx5_os_devx_destroy_event_channel
                                                        (rxq_obj->devx_channel);
                mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
                mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
@@ -375,7 +375,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
        if (!buf)
                return NULL;
        rxq_data->wqes = buf;
-       rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+       rxq_ctrl->wq_umem = mlx5_os_umem_reg(priv->sh->ctx,
                                                     buf, wq_size, 0);
        if (!rxq_ctrl->wq_umem)
                goto error;
@@ -497,7 +497,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
-       rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
+       rxq_ctrl->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf,
                                                     cq_size,
                                                     IBV_ACCESS_LOCAL_WRITE);
        if (!rxq_ctrl->cq_umem) {
@@ -533,7 +533,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
        rxq_data->cqe_n = log_cqe_n;
        rxq_data->cqn = cq_obj->id;
        if (rxq_ctrl->obj->devx_channel) {
-               ret = mlx5_glue->devx_subscribe_devx_event
+               ret = mlx5_os_devx_subscribe_devx_event
                                                (rxq_ctrl->obj->devx_channel,
                                                 cq_obj->obj,
                                                 sizeof(event_nums),
@@ -644,7 +644,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
                int devx_ev_flag =
                          MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
 
-               tmpl->devx_channel = mlx5_glue->devx_create_event_channel
+               tmpl->devx_channel = mlx5_os_devx_create_event_channel
                                                                (priv->sh->ctx,
                                                                 devx_ev_flag);
                if (!tmpl->devx_channel) {
@@ -686,7 +686,7 @@ error:
        if (tmpl->devx_cq)
                claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
        if (tmpl->devx_channel)
-               mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
+               mlx5_os_devx_destroy_event_channel(tmpl->devx_channel);
        mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
        mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
        rte_errno = ret; /* Restore rte_errno. */
@@ -694,7 +694,7 @@ error:
 }
 
 /**
- * Create RQT using DevX API as a filed of indirection table.
+ * Prepare RQT attribute structure for DevX RQT API.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -704,30 +704,31 @@ error:
  *   DevX indirection table object.
  *
  * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
+ *   The RQT attr object initialized, NULL otherwise and rte_errno is set.
  */
-static int
-mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
-                       struct mlx5_ind_table_obj *ind_tbl)
+static struct mlx5_devx_rqt_attr *
+mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
+                                    const unsigned int log_n,
+                                    const uint16_t *queues,
+                                    const uint32_t queues_n)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_devx_rqt_attr *rqt_attr = NULL;
        const unsigned int rqt_n = 1 << log_n;
        unsigned int i, j;
 
-       MLX5_ASSERT(ind_tbl);
        rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
                              rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
        if (!rqt_attr) {
                DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
                        dev->data->port_id);
                rte_errno = ENOMEM;
-               return -rte_errno;
+               return NULL;
        }
        rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
        rqt_attr->rqt_actual_size = rqt_n;
-       for (i = 0; i != ind_tbl->queues_n; ++i) {
-               struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
+       for (i = 0; i != queues_n; ++i) {
+               struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
                struct mlx5_rxq_ctrl *rxq_ctrl =
                                container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 
@@ -736,6 +737,35 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
        MLX5_ASSERT(i > 0);
        for (j = 0; i != rqt_n; ++j, ++i)
                rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
+       return rqt_attr;
+}
+
+/**
+ * Create RQT using DevX API as a filed of indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param log_n
+ *   Log of number of queues in the array.
+ * @param ind_tbl
+ *   DevX indirection table object.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
+                       struct mlx5_ind_table_obj *ind_tbl)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+
+       MLX5_ASSERT(ind_tbl);
+       rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
+                                                       ind_tbl->queues,
+                                                       ind_tbl->queues_n);
+       if (!rqt_attr)
+               return -rte_errno;
        ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
        mlx5_free(rqt_attr);
        if (!ind_tbl->rqt) {
@@ -747,6 +777,41 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
        return 0;
 }
 
+/**
+ * Modify RQT using DevX API as a filed of indirection table.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param log_n
+ *   Log of number of queues in the array.
+ * @param ind_tbl
+ *   DevX indirection table object.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
+                          const uint16_t *queues, const uint32_t queues_n,
+                          struct mlx5_ind_table_obj *ind_tbl)
+{
+       int ret = 0;
+       struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+
+       MLX5_ASSERT(ind_tbl);
+       rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
+                                                       queues,
+                                                       queues_n);
+       if (!rqt_attr)
+               return -rte_errno;
+       ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
+       mlx5_free(rqt_attr);
+       if (ret)
+               DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
+                       dev->data->port_id);
+       return ret;
+}
+
 /**
  * Destroy the DevX RQT object.
  *
@@ -876,10 +941,9 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                rte_errno = errno;
                goto error;
        }
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
-                                                              (hrxq->tir->obj);
-       if (!hrxq->action) {
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+       if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
+                                                         &hrxq->action)) {
                rte_errno = errno;
                goto error;
        }
@@ -1047,7 +1111,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
        return 0;
 }
 
-#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
+#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
 /**
  * Release DevX SQ resources.
  *
@@ -1062,7 +1126,7 @@ mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
                txq_obj->sq_devx = NULL;
        }
        if (txq_obj->sq_umem) {
-               claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
+               claim_zero(mlx5_os_umem_dereg(txq_obj->sq_umem));
                txq_obj->sq_umem = NULL;
        }
        if (txq_obj->sq_buf) {
@@ -1090,7 +1154,7 @@ mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
        if (txq_obj->cq_devx)
                claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
        if (txq_obj->cq_umem)
-               claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
+               claim_zero(mlx5_os_umem_dereg(txq_obj->cq_umem));
        if (txq_obj->cq_buf)
                mlx5_free(txq_obj->cq_buf);
        if (txq_obj->cq_dbrec_page)
@@ -1109,8 +1173,8 @@ mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
 static void
 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
 {
-       mlx5_txq_release_devx_cq_resources(txq_obj);
        mlx5_txq_release_devx_sq_resources(txq_obj);
+       mlx5_txq_release_devx_cq_resources(txq_obj);
 }
 
 /**
@@ -1178,7 +1242,7 @@ mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
                return 0;
        }
        /* Register allocated buffer in user space with DevX. */
-       txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+       txq_obj->cq_umem = mlx5_os_umem_reg(priv->sh->ctx,
                                                (void *)txq_obj->cq_buf,
                                                cqe_n * sizeof(struct mlx5_cqe),
                                                IBV_ACCESS_LOCAL_WRITE);
@@ -1277,7 +1341,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        /* Register allocated buffer in user space with DevX. */
-       txq_obj->sq_umem = mlx5_glue->devx_umem_reg
+       txq_obj->sq_umem = mlx5_os_umem_reg
                                        (priv->sh->ctx,
                                         (void *)txq_obj->sq_buf,
                                         wqe_n * sizeof(struct mlx5_wqe),
@@ -1357,7 +1421,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 
        if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
                return mlx5_txq_obj_hairpin_new(dev, idx);
-#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
+#if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
        DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
                     dev->data->port_id, idx);
        rte_errno = ENOMEM;
@@ -1458,7 +1522,7 @@ mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
        if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
                if (txq_obj->tis)
                        claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
-#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
+#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
        } else {
                mlx5_txq_release_devx_resources(txq_obj);
 #endif
@@ -1472,6 +1536,7 @@ struct mlx5_obj_ops devx_obj_ops = {
        .rxq_obj_modify = mlx5_devx_modify_rq,
        .rxq_obj_release = mlx5_rxq_devx_obj_release,
        .ind_table_new = mlx5_devx_ind_table_new,
+       .ind_table_modify = mlx5_devx_ind_table_modify,
        .ind_table_destroy = mlx5_devx_ind_table_destroy,
        .hrxq_new = mlx5_devx_hrxq_new,
        .hrxq_destroy = mlx5_devx_tir_destroy,