net/bnxt: support HCAPI interface
[dpdk.git] / drivers / net / bnxt / tf_core / tf_tbl.c
index 236affe..35a7cfa 100644 (file)
@@ -15,6 +15,7 @@
 #include "hsi_struct_def_dpdk.h"
 
 #include "tf_core.h"
+#include "tf_util.h"
 #include "tf_em.h"
 #include "tf_msg.h"
 #include "tfp.h"
@@ -22,6 +23,7 @@
 #include "bnxt.h"
 #include "tf_resources.h"
 #include "tf_rm.h"
+#include "tf_common.h"
 
 #define PTU_PTE_VALID          0x1UL
 #define PTU_PTE_LAST           0x2UL
@@ -156,7 +158,7 @@ tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,
                if (tfp_calloc(&parms) != 0)
                        goto cleanup;
 
-               tp->pg_pa_tbl[i] = (uint64_t)(uintptr_t)parms.mem_pa;
+               tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
                tp->pg_va_tbl[i] = parms.mem_va;
 
                memset(tp->pg_va_tbl[i], 0, pg_size);
@@ -283,8 +285,8 @@ tf_em_setup_page_table(struct tf_em_table *tbl)
                tf_em_link_page_table(tp, tp_next, set_pte_last);
        }
 
-       tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];
-       tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];
+       tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
+       tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
 }
 
 /**
@@ -315,7 +317,7 @@ tf_em_size_page_tbl_lvl(uint32_t page_size,
                        uint64_t *num_data_pages)
 {
        uint64_t lvl_data_size = page_size;
-       int lvl = PT_LVL_0;
+       int lvl = TF_PT_LVL_0;
        uint64_t data_size;
 
        *num_data_pages = 0;
@@ -324,10 +326,10 @@ tf_em_size_page_tbl_lvl(uint32_t page_size,
        while (lvl_data_size < data_size) {
                lvl++;
 
-               if (lvl == PT_LVL_1)
+               if (lvl == TF_PT_LVL_1)
                        lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
                                page_size;
-               else if (lvl == PT_LVL_2)
+               else if (lvl == TF_PT_LVL_2)
                        lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
                                MAX_PAGE_PTRS(page_size) * page_size;
                else
@@ -384,18 +386,18 @@ tf_em_size_page_tbls(int max_lvl,
                     uint32_t page_size,
                     uint32_t *page_cnt)
 {
-       if (max_lvl == PT_LVL_0) {
-               page_cnt[PT_LVL_0] = num_data_pages;
-       } else if (max_lvl == PT_LVL_1) {
-               page_cnt[PT_LVL_1] = num_data_pages;
-               page_cnt[PT_LVL_0] =
-               tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
-       } else if (max_lvl == PT_LVL_2) {
-               page_cnt[PT_LVL_2] = num_data_pages;
-               page_cnt[PT_LVL_1] =
-               tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);
-               page_cnt[PT_LVL_0] =
-               tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
+       if (max_lvl == TF_PT_LVL_0) {
+               page_cnt[TF_PT_LVL_0] = num_data_pages;
+       } else if (max_lvl == TF_PT_LVL_1) {
+               page_cnt[TF_PT_LVL_1] = num_data_pages;
+               page_cnt[TF_PT_LVL_0] =
+               tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
+       } else if (max_lvl == TF_PT_LVL_2) {
+               page_cnt[TF_PT_LVL_2] = num_data_pages;
+               page_cnt[TF_PT_LVL_1] =
+               tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
+               page_cnt[TF_PT_LVL_0] =
+               tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
        } else {
                return;
        }
@@ -432,7 +434,7 @@ tf_em_size_table(struct tf_em_table *tbl)
        /* Determine number of page table levels and the number
         * of data pages needed to process the given eem table.
         */
-       if (tbl->type == RECORD_TABLE) {
+       if (tbl->type == TF_RECORD_TABLE) {
                /*
                 * For action records just a memory size is provided. Work
                 * backwards to resolve to number of entries
@@ -478,9 +480,9 @@ tf_em_size_table(struct tf_em_table *tbl)
                    max_lvl + 1,
                    (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
                    num_data_pages,
-                   page_cnt[PT_LVL_0],
-                   page_cnt[PT_LVL_1],
-                   page_cnt[PT_LVL_2]);
+                   page_cnt[TF_PT_LVL_0],
+                   page_cnt[TF_PT_LVL_1],
+                   page_cnt[TF_PT_LVL_2]);
 
        return 0;
 }
@@ -506,7 +508,7 @@ tf_em_ctx_unreg(struct tf *tfp,
        struct tf_em_table *tbl;
        int i;
 
-       for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
+       for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
                tbl = &ctxp->em_tables[i];
 
                if (tbl->num_entries != 0 && tbl->entry_size != 0) {
@@ -539,10 +541,10 @@ tf_em_ctx_reg(struct tf *tfp,
 {
        struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
        struct tf_em_table *tbl;
-       int rc;
+       int rc = 0;
        int i;
 
-       for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
+       for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
                tbl = &ctxp->em_tables[i];
 
                if (tbl->num_entries && tbl->entry_size) {
@@ -717,41 +719,41 @@ tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
                return -EINVAL;
        }
        /* Rx */
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
                parms->rx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
                parms->rx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
                parms->rx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
                parms->rx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
                parms->rx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
                parms->rx_max_action_entry_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
                0;
 
        /* Tx */
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
                parms->tx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
                parms->tx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
                parms->tx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
                parms->tx_max_key_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
                parms->tx_num_flows_in_k * TF_KILOBYTE;
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
                parms->tx_max_action_entry_sz_in_bits / 8;
 
-       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =
+       tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
                0;
 
        return 0;
@@ -792,7 +794,9 @@ tf_set_tbl_entry_internal(struct tf *tfp,
        index = parms->idx;
 
        if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
-           parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4) {
+           parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
+           parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
+           parms->type != TF_TBL_TYPE_ACT_STATS_64) {
                PMD_DRV_LOG(ERR,
                            "dir:%d, Type not supported, type:%d\n",
                            parms->dir,
@@ -913,6 +917,76 @@ tf_get_tbl_entry_internal(struct tf *tfp,
        return rc;
 }
 
+/**
+ * Internal function to get a Table Entry. Supports all Table Types
+ * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+static int
+tf_get_bulk_tbl_entry_internal(struct tf *tfp,
+                         struct tf_get_bulk_tbl_entry_parms *parms)
+{
+       int rc;
+       int id;
+       uint32_t index;
+       struct bitalloc *session_pool;
+       struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
+
+       /* Lookup the pool using the table type of the element */
+       rc = tf_rm_lookup_tbl_type_pool(tfs,
+                                       parms->dir,
+                                       parms->type,
+                                       &session_pool);
+       /* Error logging handled by tf_rm_lookup_tbl_type_pool */
+       if (rc)
+               return rc;
+
+       index = parms->starting_idx;
+
+       /*
+        * Adjust the returned index/offset as there is no guarantee
+        * that the start is 0 at time of RM allocation
+        */
+       tf_rm_convert_index(tfs,
+                           parms->dir,
+                           parms->type,
+                           TF_RM_CONVERT_RM_BASE,
+                           parms->starting_idx,
+                           &index);
+
+       /* Verify that the entry has been previously allocated */
+       id = ba_inuse(session_pool, index);
+       if (id != 1) {
+               TFP_DRV_LOG(ERR,
+                  "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
+                  tf_dir_2_str(parms->dir),
+                  parms->type,
+                  index);
+               return -EINVAL;
+       }
+
+       /* Get the entry */
+       rc = tf_msg_get_bulk_tbl_entry(tfp, parms);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s, Bulk get failed, type:%d, rc:%s\n",
+                           tf_dir_2_str(parms->dir),
+                           parms->type,
+                           strerror(-rc));
+       }
+
+       return rc;
+}
+
 #if (TF_SHADOW == 1)
 /**
  * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
@@ -1179,7 +1253,9 @@ tf_alloc_tbl_entry_pool_internal(struct tf *tfp,
            parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
            parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
            parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
-           parms->type != TF_TBL_TYPE_ACT_ENCAP_64B) {
+           parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
+           parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
+           parms->type != TF_TBL_TYPE_ACT_STATS_64) {
                PMD_DRV_LOG(ERR,
                            "dir:%d, Type not supported, type:%d\n",
                            parms->dir,
@@ -1330,7 +1406,8 @@ tf_free_tbl_entry_pool_internal(struct tf *tfp,
            parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
            parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
            parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
-           parms->type != TF_TBL_TYPE_ACT_ENCAP_64B) {
+           parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
+           parms->type != TF_TBL_TYPE_ACT_STATS_64) {
                PMD_DRV_LOG(ERR,
                            "dir:%d, Type not supported, type:%d\n",
                            parms->dir,
@@ -1495,11 +1572,12 @@ tf_alloc_eem_tbl_scope(struct tf *tfp,
 
                em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
                rc = tf_msg_em_cfg(tfp,
-                                  em_tables[KEY0_TABLE].num_entries,
-                                  em_tables[KEY0_TABLE].ctx_id,
-                                  em_tables[KEY1_TABLE].ctx_id,
-                                  em_tables[RECORD_TABLE].ctx_id,
-                                  em_tables[EFC_TABLE].ctx_id,
+                                  em_tables[TF_KEY0_TABLE].num_entries,
+                                  em_tables[TF_KEY0_TABLE].ctx_id,
+                                  em_tables[TF_KEY1_TABLE].ctx_id,
+                                  em_tables[TF_RECORD_TABLE].ctx_id,
+                                  em_tables[TF_EFC_TABLE].ctx_id,
+                                  parms->hw_flow_cache_flush_timer,
                                   dir);
                if (rc) {
                        PMD_DRV_LOG(ERR,
@@ -1522,9 +1600,9 @@ tf_alloc_eem_tbl_scope(struct tf *tfp,
                 * actions related to a single table scope.
                 */
                rc = tf_create_tbl_pool_external(dir,
-                                           tbl_scope_cb,
-                                           em_tables[RECORD_TABLE].num_entries,
-                                           em_tables[RECORD_TABLE].entry_size);
+                                   tbl_scope_cb,
+                                   em_tables[TF_RECORD_TABLE].num_entries,
+                                   em_tables[TF_RECORD_TABLE].entry_size);
                if (rc) {
                        PMD_DRV_LOG(ERR,
                                    "%d TBL: Unable to allocate idx pools %s\n",
@@ -1594,7 +1672,7 @@ tf_set_tbl_entry(struct tf *tfp,
                base_addr = tf_em_get_table_page(tbl_scope_cb,
                                                 parms->dir,
                                                 offset,
-                                                RECORD_TABLE);
+                                                TF_RECORD_TABLE);
                if (base_addr == NULL) {
                        PMD_DRV_LOG(ERR,
                                    "dir:%d, Base address lookup failed\n",
@@ -1658,6 +1736,36 @@ tf_get_tbl_entry(struct tf *tfp,
        return rc;
 }
 
+/* API defined in tf_core.h */
+int
+tf_get_bulk_tbl_entry(struct tf *tfp,
+                struct tf_get_bulk_tbl_entry_parms *parms)
+{
+       int rc = 0;
+
+       TF_CHECK_PARMS_SESSION(tfp, parms);
+
+       if (parms->type == TF_TBL_TYPE_EXT) {
+               /* Not supported, yet */
+               TFP_DRV_LOG(ERR,
+                           "%s, External table type not supported\n",
+                           tf_dir_2_str(parms->dir));
+
+               rc = -EOPNOTSUPP;
+       } else {
+               /* Internal table type processing */
+               rc = tf_get_bulk_tbl_entry_internal(tfp, parms);
+               if (rc)
+                       TFP_DRV_LOG(ERR,
+                                   "%s, Bulk get failed, type:%d, rc:%s\n",
+                                   tf_dir_2_str(parms->dir),
+                                   parms->type,
+                                   strerror(-rc));
+       }
+
+       return rc;
+}
+
 /* API defined in tf_core.h */
 int
 tf_alloc_tbl_scope(struct tf *tfp,
@@ -1800,3 +1908,91 @@ tf_free_tbl_entry(struct tf *tfp,
                            rc);
        return rc;
 }
+
+
+static void
+tf_dump_link_page_table(struct tf_em_page_tbl *tp,
+                       struct tf_em_page_tbl *tp_next)
+{
+       uint64_t *pg_va;
+       uint32_t i;
+       uint32_t j;
+       uint32_t k = 0;
+
+       printf("pg_count:%d pg_size:0x%x\n",
+              tp->pg_count,
+              tp->pg_size);
+       for (i = 0; i < tp->pg_count; i++) {
+               pg_va = tp->pg_va_tbl[i];
+               printf("\t%p\n", (void *)pg_va);
+               for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
+                       printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
+                       if (((pg_va[j] & 0x7) ==
+                            tfp_cpu_to_le_64(PTU_PTE_LAST |
+                                             PTU_PTE_VALID)))
+                               return;
+
+                       if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
+                               printf("** Invalid entry **\n");
+                               return;
+                       }
+
+                       if (++k >= tp_next->pg_count) {
+                               printf("** Shouldn't get here **\n");
+                               return;
+                       }
+               }
+       }
+}
+
+void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
+
+void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
+{
+       struct tf_session      *session;
+       struct tf_tbl_scope_cb *tbl_scope_cb;
+       struct tf_em_page_tbl *tp;
+       struct tf_em_page_tbl *tp_next;
+       struct tf_em_table *tbl;
+       int i;
+       int j;
+       int dir;
+
+       printf("called %s\n", __func__);
+
+       /* find session struct */
+       session = (struct tf_session *)tfp->session->core_data;
+
+       /* find control block for table scope */
+       tbl_scope_cb = tbl_scope_cb_find(session,
+                                        tbl_scope_id);
+       if (tbl_scope_cb == NULL)
+               TFP_DRV_LOG(ERR, "No table scope\n");
+
+       for (dir = 0; dir < TF_DIR_MAX; dir++) {
+               printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
+
+               for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
+                       tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
+                       printf
+       ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
+                              j,
+                              tbl->type,
+                              tbl->num_entries,
+                              tbl->entry_size,
+                              tbl->num_lvl);
+                       if (tbl->pg_tbl[0].pg_va_tbl &&
+                           tbl->pg_tbl[0].pg_pa_tbl)
+                               printf("%p %p\n",
+                              tbl->pg_tbl[0].pg_va_tbl[0],
+                              (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
+                       for (i = 0; i < tbl->num_lvl - 1; i++) {
+                               printf("Level:%d\n", i);
+                               tp = &tbl->pg_tbl[i];
+                               tp_next = &tbl->pg_tbl[i + 1];
+                               tf_dump_link_page_table(tp, tp_next);
+                       }
+                       printf("\n");
+               }
+       }
+}