net/bnxt: support HCAPI interface
[dpdk.git] / drivers / net / bnxt / tf_core / tf_tbl.c
index dda72c3..35a7cfa 100644 (file)
@@ -15,6 +15,7 @@
 #include "hsi_struct_def_dpdk.h"
 
 #include "tf_core.h"
+#include "tf_util.h"
 #include "tf_em.h"
 #include "tf_msg.h"
 #include "tfp.h"
@@ -22,6 +23,7 @@
 #include "bnxt.h"
 #include "tf_resources.h"
 #include "tf_rm.h"
+#include "tf_common.h"
 
 #define PTU_PTE_VALID          0x1UL
 #define PTU_PTE_LAST           0x2UL
@@ -283,8 +285,8 @@ tf_em_setup_page_table(struct tf_em_table *tbl)
                tf_em_link_page_table(tp, tp_next, set_pte_last);
        }
 
-       tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];
-       tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];
+       tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
+       tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
 }
 
 /**
@@ -315,7 +317,7 @@ tf_em_size_page_tbl_lvl(uint32_t page_size,
                        uint64_t *num_data_pages)
 {
        uint64_t lvl_data_size = page_size;
-       int lvl = PT_LVL_0;
+       int lvl = TF_PT_LVL_0;
        uint64_t data_size;
 
        *num_data_pages = 0;
@@ -324,10 +326,10 @@ tf_em_size_page_tbl_lvl(uint32_t page_size,
        while (lvl_data_size < data_size) {
                lvl++;
 
-               if (lvl == PT_LVL_1)
+               if (lvl == TF_PT_LVL_1)
                        lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
                                page_size;
-               else if (lvl == PT_LVL_2)
+               else if (lvl == TF_PT_LVL_2)
                        lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
                                MAX_PAGE_PTRS(page_size) * page_size;
                else
@@ -384,18 +386,18 @@ tf_em_size_page_tbls(int max_lvl,
                     uint32_t page_size,
                     uint32_t *page_cnt)
 {
-       if (max_lvl == PT_LVL_0) {
-               page_cnt[PT_LVL_0] = num_data_pages;
-       } else if (max_lvl == PT_LVL_1) {
-               page_cnt[PT_LVL_1] = num_data_pages;
-               page_cnt[PT_LVL_0] =
-               tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
-       } else if (max_lvl == PT_LVL_2) {
-               page_cnt[PT_LVL_2] = num_data_pages;
-               page_cnt[PT_LVL_1] =
-               tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);
-               page_cnt[PT_LVL_0] =
-               tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
+       if (max_lvl == TF_PT_LVL_0) {
+               page_cnt[TF_PT_LVL_0] = num_data_pages;
+       } else if (max_lvl == TF_PT_LVL_1) {
+               page_cnt[TF_PT_LVL_1] = num_data_pages;
+               page_cnt[TF_PT_LVL_0] =
+               tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
+       } else if (max_lvl == TF_PT_LVL_2) {
+               page_cnt[TF_PT_LVL_2] = num_data_pages;
+               page_cnt[TF_PT_LVL_1] =
+               tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
+               page_cnt[TF_PT_LVL_0] =
+               tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
        } else {
                return;
        }
@@ -432,7 +434,7 @@ tf_em_size_table(struct tf_em_table *tbl)
        /* Determine number of page table levels and the number
         * of data pages needed to process the given eem table.
         */
-       if (tbl->type == RECORD_TABLE) {
+       if (tbl->type == TF_RECORD_TABLE) {
                /*
                 * For action records just a memory size is provided. Work
                 * backwards to resolve to number of entries
@@ -478,9 +480,9 @@ tf_em_size_table(struct tf_em_table *tbl)
                    max_lvl + 1,
                    (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
                    num_data_pages,
-                   page_cnt[PT_LVL_0],
-                   page_cnt[PT_LVL_1],
-                   page_cnt[PT_LVL_2]);
+                   page_cnt[TF_PT_LVL_0],
+                   page_cnt[TF_PT_LVL_1],
+                   page_cnt[TF_PT_LVL_2]);
 
        return 0;
 }
@@ -506,7 +508,7 @@ tf_em_ctx_unreg(struct tf *tfp,
        struct tf_em_table *tbl;
        int i;
 
-       for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
+       for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
                tbl = &ctxp->em_tables[i];
 
                if (tbl->num_entries != 0 && tbl->entry_size != 0) {
@@ -542,7 +544,7 @@ tf_em_ctx_reg(struct tf *tfp,
        int rc = 0;
        int i;
 
-       for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
+       for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
                tbl = &ctxp->em_tables[i];
 
                if (tbl->num_entries && tbl->entry_size) {
@@ -717,41 +719,41 @@ tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
                return -EINVAL;
        }
        /* Rx */
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
                parms->rx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
                parms->rx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
                parms->rx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
                parms->rx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
                parms->rx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
                parms->rx_max_action_entry_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
                0;
 
        /* Tx */
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
                parms->tx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
                parms->tx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
                parms->tx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
                parms->tx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
                parms->tx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
                parms->tx_max_action_entry_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
                0;
 
        return 0;
@@ -793,6 +795,7 @@ tf_set_tbl_entry_internal(struct tf *tfp,
 
        if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
            parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
+           parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
            parms->type != TF_TBL_TYPE_ACT_STATS_64) {
                PMD_DRV_LOG(ERR,
                            "dir:%d, Type not supported, type:%d\n",
@@ -914,6 +917,76 @@ tf_get_tbl_entry_internal(struct tf *tfp,
        return rc;
 }
 
+/**
+ * Internal function to get a Table Entry. Supports all Table Types
+ * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+static int
+tf_get_bulk_tbl_entry_internal(struct tf *tfp,
+                         struct tf_get_bulk_tbl_entry_parms *parms)
+{
+       int rc;
+       int id;
+       uint32_t index;
+       struct bitalloc *session_pool;
+       struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+       /* Lookup the pool using the table type of the element */
+       rc = tf_rm_lookup_tbl_type_pool(tfs,
+                                       parms->dir,
+                                       parms->type,
+                                       &session_pool);
+       /* Error logging handled by tf_rm_lookup_tbl_type_pool */
+       if (rc)
+               return rc;
+
+       index = parms->starting_idx;
+
+       /*
+        * Adjust the returned index/offset as there is no guarantee
+        * that the start is 0 at time of RM allocation
+        */
+       tf_rm_convert_index(tfs,
+                           parms->dir,
+                           parms->type,
+                           TF_RM_CONVERT_RM_BASE,
+                           parms->starting_idx,
+                           &index);
+
+       /* Verify that the entry has been previously allocated */
+       id = ba_inuse(session_pool, index);
+       if (id != 1) {
+               TFP_DRV_LOG(ERR,
+                  "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
+                  tf_dir_2_str(parms->dir),
+                  parms->type,
+                  index);
+               return -EINVAL;
+       }
+
+       /* Get the entry */
+       rc = tf_msg_get_bulk_tbl_entry(tfp, parms);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s, Bulk get failed, type:%d, rc:%s\n",
+                           tf_dir_2_str(parms->dir),
+                           parms->type,
+                           strerror(-rc));
+       }
+
+       return rc;
+}
+
 #if (TF_SHADOW == 1)
 /**
  * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
@@ -1181,6 +1254,7 @@ tf_alloc_tbl_entry_pool_internal(struct tf *tfp,
            parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
            parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
            parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
+           parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
            parms->type != TF_TBL_TYPE_ACT_STATS_64) {
                PMD_DRV_LOG(ERR,
                            "dir:%d, Type not supported, type:%d\n",
@@ -1498,11 +1572,11 @@ tf_alloc_eem_tbl_scope(struct tf *tfp,
 
                em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
                rc = tf_msg_em_cfg(tfp,
-                                  em_tables[KEY0_TABLE].num_entries,
-                                  em_tables[KEY0_TABLE].ctx_id,
-                                  em_tables[KEY1_TABLE].ctx_id,
-                                  em_tables[RECORD_TABLE].ctx_id,
-                                  em_tables[EFC_TABLE].ctx_id,
+                                  em_tables[TF_KEY0_TABLE].num_entries,
+                                  em_tables[TF_KEY0_TABLE].ctx_id,
+                                  em_tables[TF_KEY1_TABLE].ctx_id,
+                                  em_tables[TF_RECORD_TABLE].ctx_id,
+                                  em_tables[TF_EFC_TABLE].ctx_id,
                                   parms->hw_flow_cache_flush_timer,
                                   dir);
                if (rc) {
@@ -1526,9 +1600,9 @@ tf_alloc_eem_tbl_scope(struct tf *tfp,
                 * actions related to a single table scope.
                 */
                rc = tf_create_tbl_pool_external(dir,
-                                           tbl_scope_cb,
-                                           em_tables[RECORD_TABLE].num_entries,
-                                           em_tables[RECORD_TABLE].entry_size);
+                                   tbl_scope_cb,
+                                   em_tables[TF_RECORD_TABLE].num_entries,
+                                   em_tables[TF_RECORD_TABLE].entry_size);
                if (rc) {
                        PMD_DRV_LOG(ERR,
                                    "%d TBL: Unable to allocate idx pools %s\n",
@@ -1598,7 +1672,7 @@ tf_set_tbl_entry(struct tf *tfp,
                base_addr = tf_em_get_table_page(tbl_scope_cb,
                                                 parms->dir,
                                                 offset,
-                                                RECORD_TABLE);
+                                                TF_RECORD_TABLE);
                if (base_addr == NULL) {
                        PMD_DRV_LOG(ERR,
                                    "dir:%d, Base address lookup failed\n",
@@ -1662,6 +1736,36 @@ tf_get_tbl_entry(struct tf *tfp,
        return rc;
 }
 
+/* API defined in tf_core.h */
+int
+tf_get_bulk_tbl_entry(struct tf *tfp,
+                struct tf_get_bulk_tbl_entry_parms *parms)
+{
+       int rc = 0;
+
+       TF_CHECK_PARMS_SESSION(tfp, parms);
+
+       if (parms->type == TF_TBL_TYPE_EXT) {
+               /* Not supported, yet */
+               TFP_DRV_LOG(ERR,
+                           "%s, External table type not supported\n",
+                           tf_dir_2_str(parms->dir));
+
+               rc = -EOPNOTSUPP;
+       } else {
+               /* Internal table type processing */
+               rc = tf_get_bulk_tbl_entry_internal(tfp, parms);
+               if (rc)
+                       TFP_DRV_LOG(ERR,
+                                   "%s, Bulk get failed, type:%d, rc:%s\n",
+                                   tf_dir_2_str(parms->dir),
+                                   parms->type,
+                                   strerror(-rc));
+       }
+
+       return rc;
+}
+
 /* API defined in tf_core.h */
 int
 tf_alloc_tbl_scope(struct tf *tfp,
@@ -1863,12 +1967,12 @@ void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
        tbl_scope_cb = tbl_scope_cb_find(session,
                                         tbl_scope_id);
        if (tbl_scope_cb == NULL)
-               PMD_DRV_LOG(ERR, "No table scope\n");
+               TFP_DRV_LOG(ERR, "No table scope\n");
 
        for (dir = 0; dir < TF_DIR_MAX; dir++) {
                printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
 
-               for (j = KEY0_TABLE; j < MAX_TABLE; j++) {
+               for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
                        tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
                        printf
        ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",