net/mlx5: initialize flow meter ASO SQ
authorLi Zhang <lizh@nvidia.com>
Tue, 20 Apr 2021 10:55:17 +0000 (13:55 +0300)
committerRaslan Darawsheh <rasland@nvidia.com>
Wed, 21 Apr 2021 06:28:04 +0000 (08:28 +0200)
Initialize the flow meter ASO SQ WQEs with
all the constant data that should not be updated
per enqueue operation.

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/meson.build
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_age.c [deleted file]
drivers/net/mlx5/mlx5_flow_aso.c [new file with mode: 0644]
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_meter.c

index ad43141..336cdbe 100644 (file)
@@ -1290,6 +1290,22 @@ err_secondary:
                                        priv->mtr_color_reg);
                        }
                }
+               if (config->hca_attr.qos.sup &&
+                       config->hca_attr.qos.flow_meter_aso_sup) {
+                       uint32_t log_obj_size =
+                               rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
+                       if (log_obj_size >=
+                       config->hca_attr.qos.log_meter_aso_granularity &&
+                       log_obj_size <=
+                       config->hca_attr.qos.log_meter_aso_max_alloc) {
+                               sh->meter_aso_en = 1;
+                               err = mlx5_aso_flow_mtrs_mng_init(priv);
+                               if (err) {
+                                       err = -err;
+                                       goto error;
+                               }
+                       }
+               }
 #endif
 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
                if (config->hca_attr.flow_hit_aso &&
index 5aa4039..6f28be2 100644 (file)
@@ -15,7 +15,7 @@ sources = files(
         'mlx5_flow.c',
         'mlx5_flow_meter.c',
         'mlx5_flow_dv.c',
-        'mlx5_flow_age.c',
+        'mlx5_flow_aso.c',
         'mlx5_mac.c',
         'mlx5_mr.c',
         'mlx5_rss.c',
index 1b5b5cb..00055e3 100644 (file)
@@ -403,7 +403,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
                rte_errno = ENOMEM;
                return -ENOMEM;
        }
-       err = mlx5_aso_queue_init(sh);
+       err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT);
        if (err) {
                mlx5_free(sh->aso_age_mng);
                return -1;
@@ -425,8 +425,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
 {
        int i, j;
 
-       mlx5_aso_queue_stop(sh);
-       mlx5_aso_queue_uninit(sh);
+       mlx5_aso_flow_hit_queue_poll_stop(sh);
+       mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT);
        if (sh->aso_age_mng->pools) {
                struct mlx5_aso_age_pool *pool;
 
@@ -564,6 +564,66 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
        memset(&sh->cmng, 0, sizeof(sh->cmng));
 }
 
+/**
+ * Initialize the aso flow meters management structure.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_dev_ctx_shared object to free
+ */
+int
+mlx5_aso_flow_mtrs_mng_init(struct mlx5_priv *priv)
+{
+       if (!priv->mtr_idx_tbl) {
+               priv->mtr_idx_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+               if (!priv->mtr_idx_tbl) {
+                       DRV_LOG(ERR, "fail to create meter lookup table.");
+                       rte_errno = ENOMEM;
+                       return -ENOMEM;
+               }
+       }
+       if (!priv->sh->mtrmng) {
+               priv->sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
+                       sizeof(*priv->sh->mtrmng),
+                       RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+               if (!priv->sh->mtrmng) {
+                       DRV_LOG(ERR, "mlx5_aso_mtr_pools_mng allocation was failed.");
+                       rte_errno = ENOMEM;
+                       return -ENOMEM;
+               }
+               rte_spinlock_init(&priv->sh->mtrmng->mtrsl);
+               LIST_INIT(&priv->sh->mtrmng->meters);
+       }
+       return 0;
+}
+
+/**
+ * Close and release all the resources of
+ * the ASO flow meter management structure.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_dev_ctx_shared object to free.
+ */
+static void
+mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
+{
+       struct mlx5_aso_mtr_pool *mtr_pool;
+       struct mlx5_aso_mtr_pools_mng *mtrmng = sh->mtrmng;
+       uint32_t idx;
+
+       mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
+       idx = mtrmng->n_valid;
+       while (idx--) {
+               mtr_pool = mtrmng->pools[idx];
+               claim_zero(mlx5_devx_cmd_destroy
+                                               (mtr_pool->devx_obj));
+               mtrmng->n_valid--;
+               mlx5_free(mtr_pool);
+       }
+       mlx5_free(sh->mtrmng->pools);
+       mlx5_free(sh->mtrmng);
+       sh->mtrmng = NULL;
+}
+
 /* Send FLOW_AGED event if needed. */
 void
 mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
@@ -1113,6 +1173,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_flow_aso_age_mng_close(sh);
                sh->aso_age_mng = NULL;
        }
+       if (sh->mtrmng)
+               mlx5_aso_flow_mtrs_mng_close(sh);
        mlx5_flow_ipool_destroy(sh);
        mlx5_os_dev_shared_handler_uninstall(sh);
        if (sh->cnt_id_tbl) {
index 2e93dda..4ad0e14 100644 (file)
@@ -491,8 +491,13 @@ struct mlx5_aso_devx_mr {
 };
 
 struct mlx5_aso_sq_elem {
-       struct mlx5_aso_age_pool *pool;
-       uint16_t burst_size;
+       union {
+               struct {
+                       struct mlx5_aso_age_pool *pool;
+                       uint16_t burst_size;
+               };
+               struct mlx5_aso_mtr *mtr;
+       };
 };
 
 struct mlx5_aso_sq {
@@ -764,7 +769,6 @@ struct mlx5_aso_mtr_pools_mng {
        volatile uint16_t n_valid; /* Number of valid pools. */
        uint16_t n; /* Number of pools. */
        rte_spinlock_t mtrsl; /* The ASO flow meter free list lock. */
-       struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */
        struct aso_meter_list meters; /* Free ASO flow meter list. */
        struct mlx5_aso_sq sq; /*SQ using by ASO flow meter. */
        struct mlx5_aso_mtr_pool **pools; /* ASO flow meter pool array. */
@@ -1195,6 +1199,7 @@ struct mlx5_priv {
        uint8_t mtr_color_reg; /* Meter color match REG_C. */
        struct mlx5_mtr_profiles flow_meter_profiles; /* MTR profile list. */
        struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
+       struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */
        uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
        uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
        struct mlx5_mp_id mp_id; /* ID of a multi-process process */
@@ -1258,6 +1263,7 @@ int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
 bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
 int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
 int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_flow_mtrs_mng_init(struct mlx5_priv *priv);
 
 /* mlx5_ethdev.c */
 
@@ -1528,9 +1534,11 @@ eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
 
 /* mlx5_flow_aso.c */
 
-int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh);
-int mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh);
-int mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh);
-void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
+               enum mlx5_access_aso_opc_mod aso_opc_mod);
+int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh);
+int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh);
+void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
+               enum mlx5_access_aso_opc_mod aso_opc_mod);
 
 #endif /* RTE_PMD_MLX5_H_ */
index b6ec727..81cf1b5 100644 (file)
@@ -826,8 +826,8 @@ struct mlx5_flow {
 #define MLX5_FLOW_METER_DISABLE 0
 #define MLX5_FLOW_METER_ENABLE 1
 
-#define MLX5_ASO_CQE_RESPONSE_DELAY 10
-#define MLX5_MTR_POLL_CQE_TIMES    100000u
+#define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
+#define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
 
 #define MLX5_MAN_WIDTH 8
 /* Legacy Meter parameter structure. */
diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c
deleted file mode 100644 (file)
index 00cb20d..0000000
+++ /dev/null
@@ -1,591 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 Mellanox Technologies, Ltd
- */
-#include <mlx5_prm.h>
-#include <rte_malloc.h>
-#include <rte_cycles.h>
-#include <rte_eal_paging.h>
-
-#include <mlx5_malloc.h>
-#include <mlx5_common_os.h>
-#include <mlx5_common_devx.h>
-
-#include "mlx5.h"
-#include "mlx5_flow.h"
-
-
-/**
- * Destroy Completion Queue used for ASO access.
- *
- * @param[in] cq
- *   ASO CQ to destroy.
- */
-static void
-mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
-{
-       if (cq->cq_obj.cq)
-               mlx5_devx_cq_destroy(&cq->cq_obj);
-       memset(cq, 0, sizeof(*cq));
-}
-
-/**
- * Create Completion Queue used for ASO access.
- *
- * @param[in] ctx
- *   Context returned from mlx5 open_device() glue function.
- * @param[in/out] cq
- *   Pointer to CQ to create.
- * @param[in] log_desc_n
- *   Log of number of descriptors in queue.
- * @param[in] socket
- *   Socket to use for allocation.
- * @param[in] uar_page_id
- *   UAR page ID to use.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
-                  int socket, int uar_page_id)
-{
-       struct mlx5_devx_cq_attr attr = {
-               .uar_page_id = uar_page_id,
-       };
-
-       cq->log_desc_n = log_desc_n;
-       cq->cq_ci = 0;
-       return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
-}
-
-/**
- * Free MR resources.
- *
- * @param[in] mr
- *   MR to free.
- */
-static void
-mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
-{
-       claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
-       if (!mr->is_indirect && mr->umem)
-               claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
-       mlx5_free(mr->buf);
-       memset(mr, 0, sizeof(*mr));
-}
-
-/**
- * Register Memory Region.
- *
- * @param[in] ctx
- *   Context returned from mlx5 open_device() glue function.
- * @param[in] length
- *   Size of MR buffer.
- * @param[in/out] mr
- *   Pointer to MR to create.
- * @param[in] socket
- *   Socket to use for allocation.
- * @param[in] pdn
- *   Protection Domain number to use.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
-                    int socket, int pdn)
-{
-       struct mlx5_devx_mkey_attr mkey_attr;
-
-       mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
-                             socket);
-       if (!mr->buf) {
-               DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
-               return -1;
-       }
-       mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
-                                                IBV_ACCESS_LOCAL_WRITE);
-       if (!mr->umem) {
-               DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
-               goto error;
-       }
-       mkey_attr.addr = (uintptr_t)mr->buf;
-       mkey_attr.size = length;
-       mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
-       mkey_attr.pd = pdn;
-       mkey_attr.pg_access = 1;
-       mkey_attr.klm_array = NULL;
-       mkey_attr.klm_num = 0;
-       mkey_attr.relaxed_ordering_read = 0;
-       mkey_attr.relaxed_ordering_write = 0;
-       mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
-       if (!mr->mkey) {
-               DRV_LOG(ERR, "Failed to create direct Mkey.");
-               goto error;
-       }
-       mr->length = length;
-       mr->is_indirect = false;
-       return 0;
-error:
-       if (mr->umem)
-               claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
-       mlx5_free(mr->buf);
-       return -1;
-}
-
-/**
- * Destroy Send Queue used for ASO access.
- *
- * @param[in] sq
- *   ASO SQ to destroy.
- */
-static void
-mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
-{
-       mlx5_devx_sq_destroy(&sq->sq_obj);
-       mlx5_aso_cq_destroy(&sq->cq);
-       mlx5_aso_devx_dereg_mr(&sq->mr);
-       memset(sq, 0, sizeof(*sq));
-}
-
-/**
- * Initialize Send Queue used for ASO access.
- *
- * @param[in] sq
- *   ASO SQ to initialize.
- */
-static void
-mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
-{
-       volatile struct mlx5_aso_wqe *restrict wqe;
-       int i;
-       int size = 1 << sq->log_desc_n;
-       uint64_t addr;
-
-       /* All the next fields state should stay constant. */
-       for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
-               wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
-                                                         (sizeof(*wqe) >> 4));
-               wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
-               addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
-                                           MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
-               wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
-               wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
-               wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
-                       (0u |
-                        (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
-                        (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
-                        (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
-                        (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
-               wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
-       }
-}
-
-/**
- * Create Send Queue used for ASO access.
- *
- * @param[in] ctx
- *   Context returned from mlx5 open_device() glue function.
- * @param[in/out] sq
- *   Pointer to SQ to create.
- * @param[in] socket
- *   Socket to use for allocation.
- * @param[in] uar
- *   User Access Region object.
- * @param[in] pdn
- *   Protection Domain number to use.
- * @param[in] log_desc_n
- *   Log of number of descriptors in queue.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
-                  void *uar, uint32_t pdn,  uint16_t log_desc_n,
-                  uint32_t ts_format)
-{
-       struct mlx5_devx_create_sq_attr attr = {
-               .user_index = 0xFFFF,
-               .wq_attr = (struct mlx5_devx_wq_attr){
-                       .pd = pdn,
-                       .uar_page = mlx5_os_get_devx_uar_page_id(uar),
-               },
-               .ts_format = mlx5_ts_format_conv(ts_format),
-       };
-       struct mlx5_devx_modify_sq_attr modify_attr = {
-               .state = MLX5_SQC_STATE_RDY,
-       };
-       uint32_t sq_desc_n = 1 << log_desc_n;
-       uint16_t log_wqbb_n;
-       int ret;
-
-       if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
-                                sq_desc_n, &sq->mr, socket, pdn))
-               return -1;
-       if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
-                              mlx5_os_get_devx_uar_page_id(uar)))
-               goto error;
-       sq->log_desc_n = log_desc_n;
-       attr.cqn = sq->cq.cq_obj.cq->id;
-       /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
-       log_wqbb_n = log_desc_n + 1;
-       ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
-       if (ret) {
-               DRV_LOG(ERR, "Can't create SQ object.");
-               rte_errno = ENOMEM;
-               goto error;
-       }
-       ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
-       if (ret) {
-               DRV_LOG(ERR, "Can't change SQ state to ready.");
-               rte_errno = ENOMEM;
-               goto error;
-       }
-       sq->pi = 0;
-       sq->head = 0;
-       sq->tail = 0;
-       sq->sqn = sq->sq_obj.sq->id;
-       sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
-       mlx5_aso_init_sq(sq);
-       return 0;
-error:
-       mlx5_aso_destroy_sq(sq);
-       return -1;
-}
-
-/**
- * API to create and initialize Send Queue used for ASO access.
- *
- * @param[in] sh
- *   Pointer to shared device context.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
-{
-       return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
-                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                                 sh->sq_ts_format);
-}
-
-/**
- * API to destroy Send Queue used for ASO access.
- *
- * @param[in] sh
- *   Pointer to shared device context.
- */
-void
-mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh)
-{
-       mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq);
-}
-
-/**
- * Write a burst of WQEs to ASO SQ.
- *
- * @param[in] mng
- *   ASO management data, contains the SQ.
- * @param[in] n
- *   Index of the last valid pool.
- *
- * @return
- *   Number of WQEs in burst.
- */
-static uint16_t
-mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
-{
-       volatile struct mlx5_aso_wqe *wqe;
-       struct mlx5_aso_sq *sq = &mng->aso_sq;
-       struct mlx5_aso_age_pool *pool;
-       uint16_t size = 1 << sq->log_desc_n;
-       uint16_t mask = size - 1;
-       uint16_t max;
-       uint16_t start_head = sq->head;
-
-       max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
-       if (unlikely(!max))
-               return 0;
-       sq->elts[start_head & mask].burst_size = max;
-       do {
-               wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
-               rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
-               /* Fill next WQE. */
-               rte_spinlock_lock(&mng->resize_sl);
-               pool = mng->pools[sq->next];
-               rte_spinlock_unlock(&mng->resize_sl);
-               sq->elts[sq->head & mask].pool = pool;
-               wqe->general_cseg.misc =
-                               rte_cpu_to_be_32(((struct mlx5_devx_obj *)
-                                                (pool->flow_hit_aso_obj))->id);
-               wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
-                                                        MLX5_COMP_MODE_OFFSET);
-               wqe->general_cseg.opcode = rte_cpu_to_be_32
-                                               (MLX5_OPCODE_ACCESS_ASO |
-                                                (ASO_OPC_MOD_FLOW_HIT <<
-                                                 WQE_CSEG_OPC_MOD_OFFSET) |
-                                                (sq->pi <<
-                                                 WQE_CSEG_WQE_INDEX_OFFSET));
-               sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
-               sq->head++;
-               sq->next++;
-               max--;
-       } while (max);
-       wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
-                                                        MLX5_COMP_MODE_OFFSET);
-       rte_io_wmb();
-       sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
-       rte_wmb();
-       *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
-       rte_wmb();
-       return sq->elts[start_head & mask].burst_size;
-}
-
-/**
- * Debug utility function. Dump contents of error CQE and WQE.
- *
- * @param[in] cqe
- *   Error CQE to dump.
- * @param[in] wqe
- *   Error WQE to dump.
- */
-static void
-mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
-{
-       int i;
-
-       DRV_LOG(ERR, "Error cqe:");
-       for (i = 0; i < 16; i += 4)
-               DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
-                       cqe[i + 2], cqe[i + 3]);
-       DRV_LOG(ERR, "\nError wqe:");
-       for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
-               DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
-                       wqe[i + 2], wqe[i + 3]);
-}
-
-/**
- * Handle case of error CQE.
- *
- * @param[in] sq
- *   ASO SQ to use.
- */
-static void
-mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
-{
-       struct mlx5_aso_cq *cq = &sq->cq;
-       uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
-       volatile struct mlx5_err_cqe *cqe =
-                       (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
-
-       cq->errors++;
-       idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
-       mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
-                              (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
-}
-
-/**
- * Update ASO objects upon completion.
- *
- * @param[in] sh
- *   Shared device context.
- * @param[in] n
- *   Number of completed ASO objects.
- */
-static void
-mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
-{
-       struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
-       struct mlx5_aso_sq *sq = &mng->aso_sq;
-       struct mlx5_age_info *age_info;
-       const uint16_t size = 1 << sq->log_desc_n;
-       const uint16_t mask = size - 1;
-       const uint64_t curr = MLX5_CURR_TIME_SEC;
-       uint16_t expected = AGE_CANDIDATE;
-       uint16_t i;
-
-       for (i = 0; i < n; ++i) {
-               uint16_t idx = (sq->tail + i) & mask;
-               struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
-               uint64_t diff = curr - pool->time_of_last_age_check;
-               uint64_t *addr = sq->mr.buf;
-               int j;
-
-               addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
-               pool->time_of_last_age_check = curr;
-               for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
-                       struct mlx5_aso_age_action *act = &pool->actions[j];
-                       struct mlx5_age_param *ap = &act->age_params;
-                       uint8_t byte;
-                       uint8_t offset;
-                       uint8_t *u8addr;
-                       uint8_t hit;
-
-                       if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
-                                           AGE_CANDIDATE)
-                               continue;
-                       byte = 63 - (j / 8);
-                       offset = j % 8;
-                       u8addr = (uint8_t *)addr;
-                       hit = (u8addr[byte] >> offset) & 0x1;
-                       if (hit) {
-                               __atomic_store_n(&ap->sec_since_last_hit, 0,
-                                                __ATOMIC_RELAXED);
-                       } else {
-                               struct mlx5_priv *priv;
-
-                               __atomic_fetch_add(&ap->sec_since_last_hit,
-                                                  diff, __ATOMIC_RELAXED);
-                               /* If timeout passed add to aged-out list. */
-                               if (ap->sec_since_last_hit <= ap->timeout)
-                                       continue;
-                               priv =
-                               rte_eth_devices[ap->port_id].data->dev_private;
-                               age_info = GET_PORT_AGE_INFO(priv);
-                               rte_spinlock_lock(&age_info->aged_sl);
-                               if (__atomic_compare_exchange_n(&ap->state,
-                                                               &expected,
-                                                               AGE_TMOUT,
-                                                               false,
-                                                              __ATOMIC_RELAXED,
-                                                           __ATOMIC_RELAXED)) {
-                                       LIST_INSERT_HEAD(&age_info->aged_aso,
-                                                        act, next);
-                                       MLX5_AGE_SET(age_info,
-                                                    MLX5_AGE_EVENT_NEW);
-                               }
-                               rte_spinlock_unlock(&age_info->aged_sl);
-                       }
-               }
-       }
-       mlx5_age_event_prepare(sh);
-}
-
-/**
- * Handle completions from WQEs sent to ASO SQ.
- *
- * @param[in] sh
- *   Shared device context.
- *
- * @return
- *   Number of CQEs handled.
- */
-static uint16_t
-mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
-{
-       struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
-       struct mlx5_aso_sq *sq = &mng->aso_sq;
-       struct mlx5_aso_cq *cq = &sq->cq;
-       volatile struct mlx5_cqe *restrict cqe;
-       const unsigned int cq_size = 1 << cq->log_desc_n;
-       const unsigned int mask = cq_size - 1;
-       uint32_t idx;
-       uint32_t next_idx = cq->cq_ci & mask;
-       const uint16_t max = (uint16_t)(sq->head - sq->tail);
-       uint16_t i = 0;
-       int ret;
-       if (unlikely(!max))
-               return 0;
-       do {
-               idx = next_idx;
-               next_idx = (cq->cq_ci + 1) & mask;
-               rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
-               cqe = &cq->cq_obj.cqes[idx];
-               ret = check_cqe(cqe, cq_size, cq->cq_ci);
-               /*
-                * Be sure owner read is done before any other cookie field or
-                * opaque field.
-                */
-               rte_io_rmb();
-               if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
-                       if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
-                               break;
-                       mlx5_aso_cqe_err_handle(sq);
-               } else {
-                       i += sq->elts[(sq->tail + i) & mask].burst_size;
-               }
-               cq->cq_ci++;
-       } while (1);
-       if (likely(i)) {
-               mlx5_aso_age_action_update(sh, i);
-               sq->tail += i;
-               rte_io_wmb();
-               cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
-       }
-       return i;
-}
-
-/**
- * Periodically read CQEs and send WQEs to ASO SQ.
- *
- * @param[in] arg
- *   Shared device context containing the ASO SQ.
- */
-static void
-mlx5_flow_aso_alarm(void *arg)
-{
-       struct mlx5_dev_ctx_shared *sh = arg;
-       struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
-       uint32_t us = 100u;
-       uint16_t n;
-
-       rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
-       n = sh->aso_age_mng->next;
-       rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
-       mlx5_aso_completion_handle(sh);
-       if (sq->next == n) {
-               /* End of loop: wait 1 second. */
-               us = US_PER_S;
-               sq->next = 0;
-       }
-       mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
-       if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
-               DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
-}
-
-/**
- * API to start ASO access using ASO SQ.
- *
- * @param[in] sh
- *   Pointer to shared device context.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_aso_queue_start(struct mlx5_dev_ctx_shared *sh)
-{
-       if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
-               DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
-               return -rte_errno;
-       }
-       return 0;
-}
-
-/**
- * API to stop ASO access using ASO SQ.
- *
- * @param[in] sh
- *   Pointer to shared device context.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_aso_queue_stop(struct mlx5_dev_ctx_shared *sh)
-{
-       int retries = 1024;
-
-       if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
-               return -EINVAL;
-       rte_errno = 0;
-       while (--retries) {
-               rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
-               if (rte_errno != EINPROGRESS)
-                       break;
-               rte_pause();
-       }
-       return -rte_errno;
-}
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
new file mode 100644 (file)
index 0000000..067471b
--- /dev/null
@@ -0,0 +1,659 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2020 Mellanox Technologies, Ltd
+ */
+#include <mlx5_prm.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_eal_paging.h>
+
+#include <mlx5_malloc.h>
+#include <mlx5_common_os.h>
+#include <mlx5_common_devx.h>
+
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+
+/**
+ * Destroy Completion Queue used for ASO access.
+ *
+ * @param[in] cq
+ *   ASO CQ to destroy.
+ */
+static void
+mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
+{
+       if (cq->cq_obj.cq)
+               mlx5_devx_cq_destroy(&cq->cq_obj);
+       memset(cq, 0, sizeof(*cq));
+}
+
+/**
+ * Create Completion Queue used for ASO access.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in/out] cq
+ *   Pointer to CQ to create.
+ * @param[in] log_desc_n
+ *   Log of number of descriptors in queue.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ * @param[in] uar_page_id
+ *   UAR page ID to use.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
+                  int socket, int uar_page_id)
+{
+       struct mlx5_devx_cq_attr attr = {
+               .uar_page_id = uar_page_id,
+       };
+
+       cq->log_desc_n = log_desc_n;
+       cq->cq_ci = 0;
+       return mlx5_devx_cq_create(ctx, &cq->cq_obj, log_desc_n, &attr, socket);
+}
+
+/**
+ * Free MR resources.
+ *
+ * @param[in] mr
+ *   MR to free.
+ */
+static void
+mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
+{
+       claim_zero(mlx5_devx_cmd_destroy(mr->mkey));
+       if (!mr->is_indirect && mr->umem)
+               claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
+       mlx5_free(mr->buf);
+       memset(mr, 0, sizeof(*mr));
+}
+
+/**
+ * Register Memory Region.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in] length
+ *   Size of MR buffer.
+ * @param[in/out] mr
+ *   Pointer to MR to create.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ * @param[in] pdn
+ *   Protection Domain number to use.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
+                    int socket, int pdn)
+{
+       struct mlx5_devx_mkey_attr mkey_attr;
+
+       mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
+                             socket);
+       if (!mr->buf) {
+               DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
+               return -1;
+       }
+       mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length,
+                                                IBV_ACCESS_LOCAL_WRITE);
+       if (!mr->umem) {
+               DRV_LOG(ERR, "Failed to register Umem for MR by Devx.");
+               goto error;
+       }
+       mkey_attr.addr = (uintptr_t)mr->buf;
+       mkey_attr.size = length;
+       mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem);
+       mkey_attr.pd = pdn;
+       mkey_attr.pg_access = 1;
+       mkey_attr.klm_array = NULL;
+       mkey_attr.klm_num = 0;
+       mkey_attr.relaxed_ordering_read = 0;
+       mkey_attr.relaxed_ordering_write = 0;
+       mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
+       if (!mr->mkey) {
+               DRV_LOG(ERR, "Failed to create direct Mkey.");
+               goto error;
+       }
+       mr->length = length;
+       mr->is_indirect = false;
+       return 0;
+error:
+       if (mr->umem)
+               claim_zero(mlx5_glue->devx_umem_dereg(mr->umem));
+       mlx5_free(mr->buf);
+       return -1;
+}
+
+/**
+ * Destroy Send Queue used for ASO access.
+ *
+ * @param[in] sq
+ *   ASO SQ to destroy.
+ */
+static void
+mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
+{
+       mlx5_devx_sq_destroy(&sq->sq_obj);
+       mlx5_aso_cq_destroy(&sq->cq);
+       memset(sq, 0, sizeof(*sq));
+}
+
+/**
+ * Initialize Send Queue used for ASO access.
+ *
+ * @param[in] sq
+ *   ASO SQ to initialize.
+ */
+static void
+mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
+{
+       volatile struct mlx5_aso_wqe *restrict wqe;
+       int i;
+       int size = 1 << sq->log_desc_n;
+       uint64_t addr;
+
+       /* All the next fields state should stay constant. */
+       for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
+               wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
+                                                         (sizeof(*wqe) >> 4));
+               wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id);
+               addr = (uint64_t)((uint64_t *)sq->mr.buf + i *
+                                           MLX5_ASO_AGE_ACTIONS_PER_POOL / 64);
+               wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32));
+               wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u);
+               wqe->aso_cseg.operand_masks = rte_cpu_to_be_32
+                       (0u |
+                        (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
+                        (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
+                        (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
+                        (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
+               wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX);
+       }
+}
+
+/**
+ * Initialize Send Queue used for ASO flow meter access.
+ *
+ * @param[in] sq
+ *   ASO SQ to initialize.
+ */
+static void
+mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
+{
+       volatile struct mlx5_aso_wqe *restrict wqe;
+       int i;
+       int size = 1 << sq->log_desc_n;
+       uint32_t idx;
+
+       /* All the next fields state should stay constant. */
+       for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) {
+               wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) |
+                                                         (sizeof(*wqe) >> 4));
+               wqe->aso_cseg.operand_masks = RTE_BE32(0u |
+                        (ASO_OPER_LOGICAL_OR << ASO_CSEG_COND_OPER_OFFSET) |
+                        (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_1_OPER_OFFSET) |
+                        (ASO_OP_ALWAYS_TRUE << ASO_CSEG_COND_0_OPER_OFFSET) |
+                        (BYTEWISE_64BYTE << ASO_CSEG_DATA_MASK_MODE_OFFSET));
+               wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+                                                        MLX5_COMP_MODE_OFFSET);
+               for (idx = 0; idx < MLX5_ASO_METERS_PER_WQE;
+                       idx++)
+                       wqe->aso_dseg.mtrs[idx].v_bo_sc_bbog_mm =
+                               RTE_BE32((1 << ASO_DSEG_VALID_OFFSET) |
+                               (MLX5_FLOW_COLOR_GREEN << ASO_DSEG_SC_OFFSET));
+       }
+}
+
+/**
+ * Create Send Queue used for ASO access.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in/out] sq
+ *   Pointer to SQ to create.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ * @param[in] uar
+ *   User Access Region object.
+ * @param[in] pdn
+ *   Protection Domain number to use.
+ * @param[in] log_desc_n
+ *   Log of number of descriptors in queue.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
+                  void *uar, uint32_t pdn,  uint16_t log_desc_n,
+                  uint32_t ts_format)
+{
+       struct mlx5_devx_create_sq_attr attr = {
+               .user_index = 0xFFFF,
+               .wq_attr = (struct mlx5_devx_wq_attr){
+                       .pd = pdn,
+                       .uar_page = mlx5_os_get_devx_uar_page_id(uar),
+               },
+               .ts_format = mlx5_ts_format_conv(ts_format),
+       };
+       struct mlx5_devx_modify_sq_attr modify_attr = {
+               .state = MLX5_SQC_STATE_RDY,
+       };
+       uint16_t log_wqbb_n;
+       int ret;
+
+       if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
+                              mlx5_os_get_devx_uar_page_id(uar)))
+               goto error;
+       sq->log_desc_n = log_desc_n;
+       attr.cqn = sq->cq.cq_obj.cq->id;
+       /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */
+       log_wqbb_n = log_desc_n + 1;
+       ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket);
+       if (ret) {
+               DRV_LOG(ERR, "Can't create SQ object.");
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       ret = mlx5_devx_cmd_modify_sq(sq->sq_obj.sq, &modify_attr);
+       if (ret) {
+               DRV_LOG(ERR, "Can't change SQ state to ready.");
+               rte_errno = ENOMEM;
+               goto error;
+       }
+       sq->pi = 0;
+       sq->head = 0;
+       sq->tail = 0;
+       sq->sqn = sq->sq_obj.sq->id;
+       sq->uar_addr = mlx5_os_get_devx_uar_reg_addr(uar);
+       return 0;
+error:
+       mlx5_aso_destroy_sq(sq);
+       return -1;
+}
+
+/**
+ * API to create and initialize Send Queue used for ASO access.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
+                       enum mlx5_access_aso_opc_mod aso_opc_mod)
+{
+       uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
+
+       switch (aso_opc_mod) {
+       case ASO_OPC_MOD_FLOW_HIT:
+               if (mlx5_aso_devx_reg_mr(sh->ctx,
+                       (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+                       sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0, sh->pdn))
+                       return -1;
+               if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
+                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
+                                 sh->sq_ts_format)) {
+                       mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
+                       return -1;
+               }
+               mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
+               break;
+       case ASO_OPC_MOD_POLICER:
+               if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->sq, 0,
+                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
+                                 sh->sq_ts_format))
+                       return -1;
+               mlx5_aso_mtr_init_sq(&sh->mtrmng->sq);
+               break;
+       default:
+               DRV_LOG(ERR, "Unknown ASO operation mode");
+               return -1;
+       }
+       return 0;
+}
+
+/**
+ * API to destroy Send Queue used for ASO access.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ */
+void
+mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh,
+                               enum mlx5_access_aso_opc_mod aso_opc_mod)
+{
+       struct mlx5_aso_sq *sq;
+
+       switch (aso_opc_mod) {
+       case ASO_OPC_MOD_FLOW_HIT:
+               mlx5_aso_devx_dereg_mr(&sh->aso_age_mng->aso_sq.mr);
+               sq = &sh->aso_age_mng->aso_sq;
+               break;
+       case ASO_OPC_MOD_POLICER:
+               sq = &sh->mtrmng->sq;
+               break;
+       default:
+               DRV_LOG(ERR, "Unknown ASO operation mode");
+               return;
+       }
+       mlx5_aso_destroy_sq(sq);
+}
+
+/**
+ * Write a burst of WQEs to ASO SQ.
+ *
+ * @param[in] mng
+ *   ASO management data, contains the SQ.
+ * @param[in] n
+ *   Index of the last valid pool.
+ *
+ * @return
+ *   Number of WQEs in burst.
+ */
+static uint16_t
+mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, uint16_t n)
+{
+       volatile struct mlx5_aso_wqe *wqe;
+       struct mlx5_aso_sq *sq = &mng->aso_sq;
+       struct mlx5_aso_age_pool *pool;
+       uint16_t size = 1 << sq->log_desc_n;
+       uint16_t mask = size - 1;
+       uint16_t max;
+       uint16_t start_head = sq->head;
+
+       max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
+       if (unlikely(!max))
+               return 0;
+       sq->elts[start_head & mask].burst_size = max;
+       do {
+               wqe = &sq->sq_obj.aso_wqes[sq->head & mask];
+               rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]);
+               /* Fill next WQE. */
+               rte_spinlock_lock(&mng->resize_sl);
+               pool = mng->pools[sq->next];
+               rte_spinlock_unlock(&mng->resize_sl);
+               sq->elts[sq->head & mask].pool = pool;
+               wqe->general_cseg.misc =
+                               rte_cpu_to_be_32(((struct mlx5_devx_obj *)
+                                                (pool->flow_hit_aso_obj))->id);
+               wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
+                                                        MLX5_COMP_MODE_OFFSET);
+               wqe->general_cseg.opcode = rte_cpu_to_be_32
+                                               (MLX5_OPCODE_ACCESS_ASO |
+                                                (ASO_OPC_MOD_FLOW_HIT <<
+                                                 WQE_CSEG_OPC_MOD_OFFSET) |
+                                                (sq->pi <<
+                                                 WQE_CSEG_WQE_INDEX_OFFSET));
+               sq->pi += 2; /* Each WQE contains 2 WQEBB's. */
+               sq->head++;
+               sq->next++;
+               max--;
+       } while (max);
+       wqe->general_cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+                                                        MLX5_COMP_MODE_OFFSET);
+       rte_io_wmb();
+       sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(sq->pi);
+       rte_wmb();
+       *sq->uar_addr = *(volatile uint64_t *)wqe; /* Assume 64 bit ARCH.*/
+       rte_wmb();
+       return sq->elts[start_head & mask].burst_size;
+}
+
+/**
+ * Debug utility function. Dump contents of error CQE and WQE.
+ *
+ * @param[in] cqe
+ *   Error CQE to dump.
+ * @param[in] wqe
+ *   Error WQE to dump.
+ */
+static void
+mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
+{
+       int i;
+
+       DRV_LOG(ERR, "Error cqe:");
+       for (i = 0; i < 16; i += 4)
+               DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1],
+                       cqe[i + 2], cqe[i + 3]);
+       DRV_LOG(ERR, "\nError wqe:");
+       for (i = 0; i < (int)sizeof(struct mlx5_aso_wqe) / 4; i += 4)
+               DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1],
+                       wqe[i + 2], wqe[i + 3]);
+}
+
+/**
+ * Handle case of error CQE.
+ *
+ * @param[in] sq
+ *   ASO SQ to use.
+ */
+static void
+mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
+{
+       struct mlx5_aso_cq *cq = &sq->cq;
+       uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1);
+       volatile struct mlx5_err_cqe *cqe =
+                       (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx];
+
+       cq->errors++;
+       idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n);
+       mlx5_aso_dump_err_objs((volatile uint32_t *)cqe,
+                              (volatile uint32_t *)&sq->sq_obj.aso_wqes[idx]);
+}
+
+/**
+ * Update ASO objects upon completion.
+ *
+ * @param[in] sh
+ *   Shared device context.
+ * @param[in] n
+ *   Number of completed ASO objects.
+ */
+static void
+mlx5_aso_age_action_update(struct mlx5_dev_ctx_shared *sh, uint16_t n)
+{
+       struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
+       struct mlx5_aso_sq *sq = &mng->aso_sq;
+       struct mlx5_age_info *age_info;
+       const uint16_t size = 1 << sq->log_desc_n;
+       const uint16_t mask = size - 1;
+       const uint64_t curr = MLX5_CURR_TIME_SEC;
+       uint16_t expected = AGE_CANDIDATE;
+       uint16_t i;
+
+       for (i = 0; i < n; ++i) {
+               uint16_t idx = (sq->tail + i) & mask;
+               struct mlx5_aso_age_pool *pool = sq->elts[idx].pool;
+               uint64_t diff = curr - pool->time_of_last_age_check;
+               uint64_t *addr = sq->mr.buf;
+               int j;
+
+               addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64;
+               pool->time_of_last_age_check = curr;
+               for (j = 0; j < MLX5_ASO_AGE_ACTIONS_PER_POOL; j++) {
+                       struct mlx5_aso_age_action *act = &pool->actions[j];
+                       struct mlx5_age_param *ap = &act->age_params;
+                       uint8_t byte;
+                       uint8_t offset;
+                       uint8_t *u8addr;
+                       uint8_t hit;
+
+                       if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+                                           AGE_CANDIDATE)
+                               continue;
+                       byte = 63 - (j / 8);
+                       offset = j % 8;
+                       u8addr = (uint8_t *)addr;
+                       hit = (u8addr[byte] >> offset) & 0x1;
+                       if (hit) {
+                               __atomic_store_n(&ap->sec_since_last_hit, 0,
+                                                __ATOMIC_RELAXED);
+                       } else {
+                               struct mlx5_priv *priv;
+
+                               __atomic_fetch_add(&ap->sec_since_last_hit,
+                                                  diff, __ATOMIC_RELAXED);
+                               /* If timeout passed add to aged-out list. */
+                               if (ap->sec_since_last_hit <= ap->timeout)
+                                       continue;
+                               priv =
+                               rte_eth_devices[ap->port_id].data->dev_private;
+                               age_info = GET_PORT_AGE_INFO(priv);
+                               rte_spinlock_lock(&age_info->aged_sl);
+                               if (__atomic_compare_exchange_n(&ap->state,
+                                                               &expected,
+                                                               AGE_TMOUT,
+                                                               false,
+                                                              __ATOMIC_RELAXED,
+                                                           __ATOMIC_RELAXED)) {
+                                       LIST_INSERT_HEAD(&age_info->aged_aso,
+                                                        act, next);
+                                       MLX5_AGE_SET(age_info,
+                                                    MLX5_AGE_EVENT_NEW);
+                               }
+                               rte_spinlock_unlock(&age_info->aged_sl);
+                       }
+               }
+       }
+       mlx5_age_event_prepare(sh);
+}
+
+/**
+ * Handle completions from WQEs sent to ASO SQ.
+ *
+ * @param[in] sh
+ *   Shared device context.
+ *
+ * @return
+ *   Number of CQEs handled.
+ */
+static uint16_t
+mlx5_aso_completion_handle(struct mlx5_dev_ctx_shared *sh)
+{
+       struct mlx5_aso_age_mng *mng = sh->aso_age_mng;
+       struct mlx5_aso_sq *sq = &mng->aso_sq;
+       struct mlx5_aso_cq *cq = &sq->cq;
+       volatile struct mlx5_cqe *restrict cqe;
+       const unsigned int cq_size = 1 << cq->log_desc_n;
+       const unsigned int mask = cq_size - 1;
+       uint32_t idx;
+       uint32_t next_idx = cq->cq_ci & mask;
+       const uint16_t max = (uint16_t)(sq->head - sq->tail);
+       uint16_t i = 0;
+       int ret;
+       if (unlikely(!max))
+               return 0;
+       do {
+               idx = next_idx;
+               next_idx = (cq->cq_ci + 1) & mask;
+               rte_prefetch0(&cq->cq_obj.cqes[next_idx]);
+               cqe = &cq->cq_obj.cqes[idx];
+               ret = check_cqe(cqe, cq_size, cq->cq_ci);
+               /*
+                * Be sure owner read is done before any other cookie field or
+                * opaque field.
+                */
+               rte_io_rmb();
+               if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+                       if (likely(ret == MLX5_CQE_STATUS_HW_OWN))
+                               break;
+                       mlx5_aso_cqe_err_handle(sq);
+               } else {
+                       i += sq->elts[(sq->tail + i) & mask].burst_size;
+               }
+               cq->cq_ci++;
+       } while (1);
+       if (likely(i)) {
+               mlx5_aso_age_action_update(sh, i);
+               sq->tail += i;
+               rte_io_wmb();
+               cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
+       }
+       return i;
+}
+
+/**
+ * Periodically read CQEs and send WQEs to ASO SQ.
+ *
+ * @param[in] arg
+ *   Shared device context containing the ASO SQ.
+ */
+static void
+mlx5_flow_aso_alarm(void *arg)
+{
+       struct mlx5_dev_ctx_shared *sh = arg;
+       struct mlx5_aso_sq *sq = &sh->aso_age_mng->aso_sq;
+       uint32_t us = 100u;
+       uint16_t n;
+
+       rte_spinlock_lock(&sh->aso_age_mng->resize_sl);
+       n = sh->aso_age_mng->next;
+       rte_spinlock_unlock(&sh->aso_age_mng->resize_sl);
+       mlx5_aso_completion_handle(sh);
+       if (sq->next == n) {
+               /* End of loop: wait 1 second. */
+               us = US_PER_S;
+               sq->next = 0;
+       }
+       mlx5_aso_sq_enqueue_burst(sh->aso_age_mng, n);
+       if (rte_eal_alarm_set(us, mlx5_flow_aso_alarm, sh))
+               DRV_LOG(ERR, "Cannot reinitialize aso alarm.");
+}
+
+/**
+ * API to start ASO access using ASO SQ.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh)
+{
+       if (rte_eal_alarm_set(US_PER_S, mlx5_flow_aso_alarm, sh)) {
+               DRV_LOG(ERR, "Cannot reinitialize ASO age alarm.");
+               return -rte_errno;
+       }
+       return 0;
+}
+
+/**
+ * API to stop ASO access using ASO SQ.
+ *
+ * @param[in] sh
+ *   Pointer to shared device context.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh)
+{
+       int retries = 1024;
+
+       if (!sh->aso_age_mng->aso_sq.sq_obj.sq)
+               return -EINVAL;
+       rte_errno = 0;
+       while (--retries) {
+               rte_eal_alarm_cancel(mlx5_flow_aso_alarm, sh);
+               if (rte_errno != EINPROGRESS)
+                       break;
+               rte_pause();
+       }
+       return -rte_errno;
+}
index 4b2a272..a621417 100644 (file)
@@ -5962,6 +5962,11 @@ flow_dv_mtr_container_resize(struct rte_eth_dev *dev)
                rte_errno = ENOMEM;
                return -ENOMEM;
        }
+       if (!mtrmng->n)
+               if (mlx5_aso_queue_init(priv->sh, ASO_OPC_MOD_POLICER)) {
+                       mlx5_free(pools);
+                       return -ENOMEM;
+               }
        if (old_pools)
                memcpy(pools, old_pools, mtrmng->n *
                                       sizeof(struct mlx5_aso_mtr_pool *));
@@ -10834,7 +10839,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
                mlx5_free(old_pools);
        } else {
                /* First ASO flow hit allocation - starting ASO data-path. */
-               int ret = mlx5_aso_queue_start(priv->sh);
+               int ret = mlx5_aso_flow_hit_queue_poll_start(priv->sh);
 
                if (ret) {
                        mlx5_free(pools);
index 956a6c3..ef4ca30 100644 (file)
@@ -811,7 +811,6 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
                        struct rte_mtr_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_aso_mtr_pools_mng *mtrmng = priv->sh->mtrmng;
        struct mlx5_flow_meter_info *fm;
        const struct rte_flow_attr attr = {
                                .ingress = 1,
@@ -836,7 +835,7 @@ mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
                                          RTE_MTR_ERROR_TYPE_UNSPECIFIED,
                                          NULL, "Meter object is being used.");
        if (priv->sh->meter_aso_en) {
-               if (mlx5_l3t_clear_entry(mtrmng->mtr_idx_tbl, meter_id))
+               if (mlx5_l3t_clear_entry(priv->mtr_idx_tbl, meter_id))
                        return -rte_mtr_error_set(error, EBUSY,
                                RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
                                "Fail to delete ASO Meter in index table.");
@@ -1302,7 +1301,7 @@ mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id,
                        rte_spinlock_unlock(&mtrmng->mtrsl);
                        return NULL;
                }
-               if (mlx5_l3t_get_entry(mtrmng->mtr_idx_tbl, meter_id, &data) ||
+               if (mlx5_l3t_get_entry(priv->mtr_idx_tbl, meter_id, &data) ||
                        !data.dword) {
                        rte_spinlock_unlock(&mtrmng->mtrsl);
                        return NULL;
@@ -1310,7 +1309,7 @@ mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id,
                if (mtr_idx)
                        *mtr_idx = data.dword;
                aso_mtr = mlx5_aso_meter_by_idx(priv, data.dword);
-               mlx5_l3t_clear_entry(mtrmng->mtr_idx_tbl, meter_id);
+               mlx5_l3t_clear_entry(priv->mtr_idx_tbl, meter_id);
                if (meter_id == aso_mtr->fm.meter_id) {
                        rte_spinlock_unlock(&mtrmng->mtrsl);
                        return &aso_mtr->fm;