net/txgbe: support destroying consistent filter
[dpdk.git] / drivers / net / mlx5 / mlx5_devx.c
index de9b204..da3bb78 100644 (file)
@@ -23,7 +23,7 @@
 #include "mlx5_utils.h"
 #include "mlx5_devx.h"
 #include "mlx5_flow.h"
-
+#include "mlx5_flow_os.h"
 
 /**
  * Modify RQ vlan stripping offload
@@ -155,7 +155,7 @@ mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
        struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
 
        if (rxq_ctrl->wq_umem) {
-               mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
+               mlx5_os_umem_dereg(rxq_ctrl->wq_umem);
                rxq_ctrl->wq_umem = NULL;
        }
        if (rxq_ctrl->rxq.wqes) {
@@ -182,7 +182,7 @@ mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
        struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
 
        if (rxq_ctrl->cq_umem) {
-               mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
+               mlx5_os_umem_dereg(rxq_ctrl->cq_umem);
                rxq_ctrl->cq_umem = NULL;
        }
        if (rxq_ctrl->rxq.cqes) {
@@ -216,7 +216,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
                claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
                claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
                if (rxq_obj->devx_channel)
-                       mlx5_glue->devx_destroy_event_channel
+                       mlx5_os_devx_destroy_event_channel
                                                        (rxq_obj->devx_channel);
                mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
                mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
@@ -375,7 +375,7 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
        if (!buf)
                return NULL;
        rxq_data->wqes = buf;
-       rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+       rxq_ctrl->wq_umem = mlx5_os_umem_reg(priv->sh->ctx,
                                                     buf, wq_size, 0);
        if (!rxq_ctrl->wq_umem)
                goto error;
@@ -497,7 +497,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
-       rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
+       rxq_ctrl->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf,
                                                     cq_size,
                                                     IBV_ACCESS_LOCAL_WRITE);
        if (!rxq_ctrl->cq_umem) {
@@ -533,7 +533,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
        rxq_data->cqe_n = log_cqe_n;
        rxq_data->cqn = cq_obj->id;
        if (rxq_ctrl->obj->devx_channel) {
-               ret = mlx5_glue->devx_subscribe_devx_event
+               ret = mlx5_os_devx_subscribe_devx_event
                                                (rxq_ctrl->obj->devx_channel,
                                                 cq_obj->obj,
                                                 sizeof(event_nums),
@@ -644,7 +644,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
                int devx_ev_flag =
                          MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
 
-               tmpl->devx_channel = mlx5_glue->devx_create_event_channel
+               tmpl->devx_channel = mlx5_os_devx_create_event_channel
                                                                (priv->sh->ctx,
                                                                 devx_ev_flag);
                if (!tmpl->devx_channel) {
@@ -686,7 +686,7 @@ error:
        if (tmpl->devx_cq)
                claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
        if (tmpl->devx_channel)
-               mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
+               mlx5_os_devx_destroy_event_channel(tmpl->devx_channel);
        mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
        mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
        rte_errno = ret; /* Restore rte_errno. */
@@ -941,10 +941,9 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
                rte_errno = errno;
                goto error;
        }
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
-                                                              (hrxq->tir->obj);
-       if (!hrxq->action) {
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+       if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
+                                                         &hrxq->action)) {
                rte_errno = errno;
                goto error;
        }
@@ -1112,7 +1111,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
        return 0;
 }
 
-#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
+#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
 /**
  * Release DevX SQ resources.
  *
@@ -1127,7 +1126,7 @@ mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
                txq_obj->sq_devx = NULL;
        }
        if (txq_obj->sq_umem) {
-               claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
+               claim_zero(mlx5_os_umem_dereg(txq_obj->sq_umem));
                txq_obj->sq_umem = NULL;
        }
        if (txq_obj->sq_buf) {
@@ -1155,7 +1154,7 @@ mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
        if (txq_obj->cq_devx)
                claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
        if (txq_obj->cq_umem)
-               claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
+               claim_zero(mlx5_os_umem_dereg(txq_obj->cq_umem));
        if (txq_obj->cq_buf)
                mlx5_free(txq_obj->cq_buf);
        if (txq_obj->cq_dbrec_page)
@@ -1243,7 +1242,7 @@ mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
                return 0;
        }
        /* Register allocated buffer in user space with DevX. */
-       txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+       txq_obj->cq_umem = mlx5_os_umem_reg(priv->sh->ctx,
                                                (void *)txq_obj->cq_buf,
                                                cqe_n * sizeof(struct mlx5_cqe),
                                                IBV_ACCESS_LOCAL_WRITE);
@@ -1342,7 +1341,7 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        /* Register allocated buffer in user space with DevX. */
-       txq_obj->sq_umem = mlx5_glue->devx_umem_reg
+       txq_obj->sq_umem = mlx5_os_umem_reg
                                        (priv->sh->ctx,
                                         (void *)txq_obj->sq_buf,
                                         wqe_n * sizeof(struct mlx5_wqe),
@@ -1422,7 +1421,7 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 
        if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
                return mlx5_txq_obj_hairpin_new(dev, idx);
-#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
+#if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
        DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
                     dev->data->port_id, idx);
        rte_errno = ENOMEM;
@@ -1523,7 +1522,7 @@ mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
        if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
                if (txq_obj->tis)
                        claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
-#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
+#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
        } else {
                mlx5_txq_release_devx_resources(txq_obj);
 #endif