1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
6 /* Truflow Table APIs and supporting code */
12 #include <sys/param.h>
13 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include "hsi_struct_def_dpdk.h"
24 #include "tf_resources.h"
27 #include "tf_common.h"
29 #define PTU_PTE_VALID 0x1UL
30 #define PTU_PTE_LAST 0x2UL
31 #define PTU_PTE_NEXT_TO_LAST 0x4UL
33 /* Number of pointers per page_size */
34 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
36 #define TF_EM_PG_SZ_4K (1 << 12)
37 #define TF_EM_PG_SZ_8K (1 << 13)
38 #define TF_EM_PG_SZ_64K (1 << 16)
39 #define TF_EM_PG_SZ_256K (1 << 18)
40 #define TF_EM_PG_SZ_1M (1 << 20)
41 #define TF_EM_PG_SZ_2M (1 << 21)
42 #define TF_EM_PG_SZ_4M (1 << 22)
43 #define TF_EM_PG_SZ_1G (1 << 30)
45 #define TF_EM_CTX_ID_INVALID 0xFFFF
47 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
48 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
51 * Function to free a page table
54 * Pointer to the page table to free
57 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
61 for (i = 0; i < tp->pg_count; i++) {
62 if (!tp->pg_va_tbl[i]) {
64 "No mapping for page: %d table: %016" PRIu64 "\n",
66 (uint64_t)(uintptr_t)tp);
70 tfp_free(tp->pg_va_tbl[i]);
71 tp->pg_va_tbl[i] = NULL;
75 tfp_free(tp->pg_va_tbl);
77 tfp_free(tp->pg_pa_tbl);
82 * Function to free an EM table
85 * Pointer to the EM table to free
88 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
90 struct hcapi_cfa_em_page_tbl *tp;
93 for (i = 0; i < tbl->num_lvl; i++) {
96 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
101 tf_em_free_pg_tbl(tp);
105 tbl->l0_dma_addr = 0;
107 tbl->num_data_pages = 0;
111 * Allocation of page tables
114 * Pointer to a TruFlow handle
117 * Page count to allocate
124 * -ENOMEM - Out of memory
127 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
132 struct tfp_calloc_parms parms;
134 parms.nitems = pg_count;
135 parms.size = sizeof(void *);
138 if (tfp_calloc(&parms) != 0)
141 tp->pg_va_tbl = parms.mem_va;
143 if (tfp_calloc(&parms) != 0) {
144 tfp_free(tp->pg_va_tbl);
148 tp->pg_pa_tbl = parms.mem_va;
151 tp->pg_size = pg_size;
153 for (i = 0; i < pg_count; i++) {
155 parms.size = pg_size;
156 parms.alignment = TF_EM_PAGE_ALIGNMENT;
158 if (tfp_calloc(&parms) != 0)
161 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
162 tp->pg_va_tbl[i] = parms.mem_va;
164 memset(tp->pg_va_tbl[i], 0, pg_size);
171 tf_em_free_pg_tbl(tp);
176 * Allocates EM page tables
179 * Table to allocate pages for
183 * -ENOMEM - Out of memory
186 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
188 struct hcapi_cfa_em_page_tbl *tp;
193 for (i = 0; i < tbl->num_lvl; i++) {
194 tp = &tbl->pg_tbl[i];
196 rc = tf_em_alloc_pg_tbl(tp,
201 "Failed to allocate page table: lvl: %d, rc:%s\n",
207 for (j = 0; j < tp->pg_count; j++) {
209 "EEM: Allocated page table: size %u lvl %d cnt"
214 (uint32_t *)tp->pg_va_tbl[j],
215 (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
221 tf_em_free_page_table(tbl);
226 * Links EM page tables
229 * Pointer to page table
232 * Pointer to the next page table
235 * Flag controlling if the page table is last
238 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
239 struct hcapi_cfa_em_page_tbl *tp_next,
242 uint64_t *pg_pa = tp_next->pg_pa_tbl;
249 for (i = 0; i < tp->pg_count; i++) {
250 pg_va = tp->pg_va_tbl[i];
252 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
253 if (k == tp_next->pg_count - 2 && set_pte_last)
254 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
255 else if (k == tp_next->pg_count - 1 && set_pte_last)
256 valid = PTU_PTE_LAST | PTU_PTE_VALID;
258 valid = PTU_PTE_VALID;
260 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
261 if (++k >= tp_next->pg_count)
268 * Setup a EM page table
271 * Pointer to EM page table
274 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
276 struct hcapi_cfa_em_page_tbl *tp_next;
277 struct hcapi_cfa_em_page_tbl *tp;
278 bool set_pte_last = 0;
281 for (i = 0; i < tbl->num_lvl - 1; i++) {
282 tp = &tbl->pg_tbl[i];
283 tp_next = &tbl->pg_tbl[i + 1];
284 if (i == tbl->num_lvl - 2)
286 tf_em_link_page_table(tp, tp_next, set_pte_last);
289 tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
290 tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
294 * Given the page size, size of each data item (entry size),
295 * and the total number of entries needed, determine the number
296 * of page table levels and the number of data pages required.
305 * Number of entries needed
307 * [out] num_data_pages
308 * Number of pages required
311 * Success - Number of EM page levels required
312 * -ENOMEM - Out of memory
315 tf_em_size_page_tbl_lvl(uint32_t page_size,
317 uint32_t num_entries,
318 uint64_t *num_data_pages)
320 uint64_t lvl_data_size = page_size;
321 int lvl = TF_PT_LVL_0;
325 data_size = (uint64_t)num_entries * entry_size;
327 while (lvl_data_size < data_size) {
330 if (lvl == TF_PT_LVL_1)
331 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
333 else if (lvl == TF_PT_LVL_2)
334 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
335 MAX_PAGE_PTRS(page_size) * page_size;
340 *num_data_pages = roundup(data_size, page_size) / page_size;
346 * Return the number of page table pages needed to
347 * reference the given number of next level pages.
353 * Size of each EM page
356 * Number of EM page table pages
359 tf_em_page_tbl_pgcnt(uint32_t num_pages,
362 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
363 MAX_PAGE_PTRS(page_size);
368 * Given the number of data pages, page_size and the maximum
369 * number of page table levels (already determined), size
370 * the number of page table pages required at each level.
373 * Max number of levels
375 * [in] num_data_pages
376 * Number of EM data pages
385 tf_em_size_page_tbls(int max_lvl,
386 uint64_t num_data_pages,
390 if (max_lvl == TF_PT_LVL_0) {
391 page_cnt[TF_PT_LVL_0] = num_data_pages;
392 } else if (max_lvl == TF_PT_LVL_1) {
393 page_cnt[TF_PT_LVL_1] = num_data_pages;
394 page_cnt[TF_PT_LVL_0] =
395 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
396 } else if (max_lvl == TF_PT_LVL_2) {
397 page_cnt[TF_PT_LVL_2] = num_data_pages;
398 page_cnt[TF_PT_LVL_1] =
399 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
400 page_cnt[TF_PT_LVL_0] =
401 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
408 * Size the EM table based on capabilities
415 * - EINVAL - Parameter error
416 * - ENOMEM - Out of memory
419 tf_em_size_table(struct hcapi_cfa_em_table *tbl)
421 uint64_t num_data_pages;
424 uint32_t num_entries;
425 uint32_t cnt = TF_EM_MIN_ENTRIES;
427 /* Ignore entry if both size and number are zero */
428 if (!tbl->entry_size && !tbl->num_entries)
431 /* If only one is set then error */
432 if (!tbl->entry_size || !tbl->num_entries)
435 /* Determine number of page table levels and the number
436 * of data pages needed to process the given eem table.
438 if (tbl->type == TF_RECORD_TABLE) {
440 * For action records just a memory size is provided. Work
441 * backwards to resolve to number of entries
443 num_entries = tbl->num_entries / tbl->entry_size;
444 if (num_entries < TF_EM_MIN_ENTRIES) {
445 num_entries = TF_EM_MIN_ENTRIES;
447 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
452 num_entries = tbl->num_entries;
455 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
460 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
462 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
463 tbl->type, (uint64_t)num_entries * tbl->entry_size,
468 tbl->num_lvl = max_lvl + 1;
469 tbl->num_data_pages = num_data_pages;
471 /* Determine the number of pages needed at each level */
472 page_cnt = tbl->page_cnt;
473 memset(page_cnt, 0, sizeof(tbl->page_cnt));
474 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
477 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
479 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
481 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
483 page_cnt[TF_PT_LVL_0],
484 page_cnt[TF_PT_LVL_1],
485 page_cnt[TF_PT_LVL_2]);
491 * Unregisters EM Ctx in Firmware
494 * Pointer to a TruFlow handle
497 * Pointer to a table scope control block
500 * Receive or transmit direction
503 tf_em_ctx_unreg(struct tf *tfp,
504 struct tf_tbl_scope_cb *tbl_scope_cb,
507 struct hcapi_cfa_em_ctx_mem_info *ctxp =
508 &tbl_scope_cb->em_ctx_info[dir];
509 struct hcapi_cfa_em_table *tbl;
512 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
513 tbl = &ctxp->em_tables[i];
515 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
516 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
517 tf_em_free_page_table(tbl);
523 * Registers EM Ctx in Firmware
526 * Pointer to a TruFlow handle
529 * Pointer to a table scope control block
532 * Receive or transmit direction
536 * -ENOMEM - Out of Memory
539 tf_em_ctx_reg(struct tf *tfp,
540 struct tf_tbl_scope_cb *tbl_scope_cb,
543 struct hcapi_cfa_em_ctx_mem_info *ctxp =
544 &tbl_scope_cb->em_ctx_info[dir];
545 struct hcapi_cfa_em_table *tbl;
549 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
550 tbl = &ctxp->em_tables[i];
552 if (tbl->num_entries && tbl->entry_size) {
553 rc = tf_em_size_table(tbl);
558 rc = tf_em_alloc_page_table(tbl);
562 tf_em_setup_page_table(tbl);
563 rc = tf_msg_em_mem_rgtr(tfp,
565 TF_EM_PAGE_SIZE_ENUM,
575 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
580 * Validates EM number of entries requested
583 * Pointer to table scope control block to be populated
586 * Pointer to input parameters
590 * -EINVAL - Parameter error
593 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
594 struct tf_alloc_tbl_scope_parms *parms)
598 if (parms->rx_mem_size_in_mb != 0) {
599 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
600 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
602 uint32_t num_entries = (parms->rx_mem_size_in_mb *
603 TF_MEGABYTE) / (key_b + action_b);
605 if (num_entries < TF_EM_MIN_ENTRIES) {
606 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
608 parms->rx_mem_size_in_mb);
612 cnt = TF_EM_MIN_ENTRIES;
613 while (num_entries > cnt &&
614 cnt <= TF_EM_MAX_ENTRIES)
617 if (cnt > TF_EM_MAX_ENTRIES) {
618 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
620 (parms->tx_num_flows_in_k * TF_KILOBYTE));
624 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
626 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
628 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
629 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
631 "EEM: Invalid number of Rx flows "
632 "requested:%u max:%u\n",
633 parms->rx_num_flows_in_k * TF_KILOBYTE,
634 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
638 /* must be a power-of-2 supported value
639 * in the range 32K - 128M
641 cnt = TF_EM_MIN_ENTRIES;
642 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
643 cnt <= TF_EM_MAX_ENTRIES)
646 if (cnt > TF_EM_MAX_ENTRIES) {
648 "EEM: Invalid number of Rx requested: %u\n",
649 (parms->rx_num_flows_in_k * TF_KILOBYTE));
654 if (parms->tx_mem_size_in_mb != 0) {
655 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
656 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
658 uint32_t num_entries = (parms->tx_mem_size_in_mb *
659 (TF_KILOBYTE * TF_KILOBYTE)) /
662 if (num_entries < TF_EM_MIN_ENTRIES) {
664 "EEM: Insufficient memory requested:%uMB\n",
665 parms->rx_mem_size_in_mb);
669 cnt = TF_EM_MIN_ENTRIES;
670 while (num_entries > cnt &&
671 cnt <= TF_EM_MAX_ENTRIES)
674 if (cnt > TF_EM_MAX_ENTRIES) {
676 "EEM: Invalid number of Tx requested: %u\n",
677 (parms->tx_num_flows_in_k * TF_KILOBYTE));
681 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
683 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
685 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
686 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
688 "EEM: Invalid number of Tx flows "
689 "requested:%u max:%u\n",
690 (parms->tx_num_flows_in_k * TF_KILOBYTE),
691 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
695 cnt = TF_EM_MIN_ENTRIES;
696 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
697 cnt <= TF_EM_MAX_ENTRIES)
700 if (cnt > TF_EM_MAX_ENTRIES) {
702 "EEM: Invalid number of Tx requested: %u\n",
703 (parms->tx_num_flows_in_k * TF_KILOBYTE));
708 if (parms->rx_num_flows_in_k != 0 &&
709 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
711 "EEM: Rx key size required: %u\n",
712 (parms->rx_max_key_sz_in_bits));
716 if (parms->tx_num_flows_in_k != 0 &&
717 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
719 "EEM: Tx key size required: %u\n",
720 (parms->tx_max_key_sz_in_bits));
724 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
725 parms->rx_num_flows_in_k * TF_KILOBYTE;
726 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
727 parms->rx_max_key_sz_in_bits / 8;
729 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
730 parms->rx_num_flows_in_k * TF_KILOBYTE;
731 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
732 parms->rx_max_key_sz_in_bits / 8;
734 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
735 parms->rx_num_flows_in_k * TF_KILOBYTE;
736 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
737 parms->rx_max_action_entry_sz_in_bits / 8;
739 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
743 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
744 parms->tx_num_flows_in_k * TF_KILOBYTE;
745 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
746 parms->tx_max_key_sz_in_bits / 8;
748 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
749 parms->tx_num_flows_in_k * TF_KILOBYTE;
750 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
751 parms->tx_max_key_sz_in_bits / 8;
753 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
754 parms->tx_num_flows_in_k * TF_KILOBYTE;
755 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
756 parms->tx_max_action_entry_sz_in_bits / 8;
758 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
765 * Internal function to get a Table Entry. Supports all Table Types
766 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
769 * Pointer to TruFlow handle
772 * Pointer to input parameters
776 * -EINVAL - Parameter error
779 tf_bulk_get_tbl_entry_internal(struct tf *tfp,
780 struct tf_bulk_get_tbl_entry_parms *parms)
785 struct bitalloc *session_pool;
786 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
788 /* Lookup the pool using the table type of the element */
789 rc = tf_rm_lookup_tbl_type_pool(tfs,
793 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
797 index = parms->starting_idx;
800 * Adjust the returned index/offset as there is no guarantee
801 * that the start is 0 at time of RM allocation
803 tf_rm_convert_index(tfs,
806 TF_RM_CONVERT_RM_BASE,
810 /* Verify that the entry has been previously allocated */
811 id = ba_inuse(session_pool, index);
814 "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
815 tf_dir_2_str(parms->dir),
822 rc = tf_msg_bulk_get_tbl_entry(tfp, parms);
825 "%s, Bulk get failed, type:%d, rc:%s\n",
826 tf_dir_2_str(parms->dir),
836 * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
837 * the requested entry. If found the ref count is incremente and
843 * Allocation parameters
846 * 0 - Success, entry found and ref count incremented
847 * -ENOENT - Failure, entry not found
850 tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
851 struct tf_alloc_tbl_entry_parms *parms __rte_unused)
854 "%s, Entry Alloc with search not supported\n",
855 tf_dir_2_str(parms->dir));
861 * Free Tbl entry from the Shadow DB. Shadow DB is searched for
862 * the requested entry. If found the ref count is decremente and
863 * new ref_count returned.
868 * Allocation parameters
871 * 0 - Success, entry found and ref count decremented
872 * -ENOENT - Failure, entry not found
875 tf_free_tbl_entry_shadow(struct tf_session *tfs,
876 struct tf_free_tbl_entry_parms *parms)
879 "%s, Entry Free with search not supported\n",
880 tf_dir_2_str(parms->dir));
884 #endif /* TF_SHADOW */
887 * Create External Tbl pool of memory indexes.
892 * pointer to the table scope
894 * number of entries to write
895 * [in] entry_sz_bytes
899 * 0 - Success, entry allocated - no search support
900 * -ENOMEM -EINVAL -EOPNOTSUPP
901 * - Failure, entry not allocated, out of resources
904 tf_create_tbl_pool_external(enum tf_dir dir,
905 struct tf_tbl_scope_cb *tbl_scope_cb,
906 uint32_t num_entries,
907 uint32_t entry_sz_bytes)
909 struct tfp_calloc_parms parms;
913 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
915 parms.nitems = num_entries;
916 parms.size = sizeof(uint32_t);
919 if (tfp_calloc(&parms) != 0) {
920 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
921 tf_dir_2_str(dir), strerror(ENOMEM));
925 /* Create empty stack
927 rc = stack_init(num_entries, parms.mem_va, pool);
930 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
931 tf_dir_2_str(dir), strerror(-rc));
935 /* Save the malloced memory address so that it can
936 * be freed when the table scope is freed.
938 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
940 /* Fill pool with indexes in reverse
942 j = (num_entries - 1) * entry_sz_bytes;
944 for (i = 0; i < num_entries; i++) {
945 rc = stack_push(pool, j);
947 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
948 tf_dir_2_str(dir), strerror(-rc));
953 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
960 if (!stack_is_full(pool)) {
962 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
963 tf_dir_2_str(dir), strerror(-rc));
968 tfp_free((void *)parms.mem_va);
973 * Destroy External Tbl pool of memory indexes.
978 * pointer to the table scope
982 tf_destroy_tbl_pool_external(enum tf_dir dir,
983 struct tf_tbl_scope_cb *tbl_scope_cb)
985 uint32_t *ext_act_pool_mem =
986 tbl_scope_cb->ext_act_pool_mem[dir];
988 tfp_free(ext_act_pool_mem);
991 /* API defined in tf_em.h */
992 struct tf_tbl_scope_cb *
993 tbl_scope_cb_find(struct tf_session *session,
994 uint32_t tbl_scope_id)
998 /* Check that id is valid */
999 i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
1003 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
1004 if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
1005 return &session->tbl_scopes[i];
1011 /* API defined in tf_core.h */
1013 tf_free_eem_tbl_scope_cb(struct tf *tfp,
1014 struct tf_free_tbl_scope_parms *parms)
1018 struct tf_tbl_scope_cb *tbl_scope_cb;
1019 struct tf_session *session;
1021 session = (struct tf_session *)(tfp->session->core_data);
1023 tbl_scope_cb = tbl_scope_cb_find(session,
1024 parms->tbl_scope_id);
1026 if (tbl_scope_cb == NULL) {
1027 TFP_DRV_LOG(ERR, "Table scope error\n");
1031 /* Free Table control block */
1032 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1034 /* free table scope locks */
1035 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1036 /* Free associated external pools
1038 tf_destroy_tbl_pool_external(dir,
1042 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1044 /* free table scope and all associated resources */
1045 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1051 /* API defined in tf_em.h */
1053 tf_alloc_eem_tbl_scope(struct tf *tfp,
1054 struct tf_alloc_tbl_scope_parms *parms)
1058 struct tf_tbl_scope_cb *tbl_scope_cb;
1059 struct hcapi_cfa_em_table *em_tables;
1061 struct tf_session *session;
1062 struct tf_free_tbl_scope_parms free_parms;
1064 session = (struct tf_session *)tfp->session->core_data;
1066 /* Get Table Scope control block from the session pool */
1067 index = ba_alloc(session->tbl_scope_pool_rx);
1069 TFP_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
1074 tbl_scope_cb = &session->tbl_scopes[index];
1075 tbl_scope_cb->index = index;
1076 tbl_scope_cb->tbl_scope_id = index;
1077 parms->tbl_scope_id = index;
1079 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1080 rc = tf_msg_em_qcaps(tfp,
1082 &tbl_scope_cb->em_caps[dir]);
1085 "EEM: Unable to query for EEM capability,"
1093 * Validate and setup table sizes
1095 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1098 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1100 * Allocate tables and signal configuration to FW
1102 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1105 "EEM: Unable to register for EEM ctx,"
1111 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1112 rc = tf_msg_em_cfg(tfp,
1113 em_tables[TF_KEY0_TABLE].num_entries,
1114 em_tables[TF_KEY0_TABLE].ctx_id,
1115 em_tables[TF_KEY1_TABLE].ctx_id,
1116 em_tables[TF_RECORD_TABLE].ctx_id,
1117 em_tables[TF_EFC_TABLE].ctx_id,
1118 parms->hw_flow_cache_flush_timer,
1122 "TBL: Unable to configure EEM in firmware"
1128 rc = tf_msg_em_op(tfp,
1130 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1134 "EEM: Unable to enable EEM in firmware"
1140 /* Allocate the pool of offsets of the external memory.
1141 * Initially, this is a single fixed size pool for all external
1142 * actions related to a single table scope.
1144 rc = tf_create_tbl_pool_external(dir,
1146 em_tables[TF_RECORD_TABLE].num_entries,
1147 em_tables[TF_RECORD_TABLE].entry_size);
1150 "%s TBL: Unable to allocate idx pools %s\n",
1160 free_parms.tbl_scope_id = index;
1161 tf_free_eem_tbl_scope_cb(tfp, &free_parms);
1165 /* Free Table control block */
1166 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1170 /* API defined in tf_core.h */
1172 tf_bulk_get_tbl_entry(struct tf *tfp,
1173 struct tf_bulk_get_tbl_entry_parms *parms)
1177 TF_CHECK_PARMS_SESSION(tfp, parms);
1179 if (parms->type == TF_TBL_TYPE_EXT) {
1180 /* Not supported, yet */
1182 "%s, External table type not supported\n",
1183 tf_dir_2_str(parms->dir));
1187 /* Internal table type processing */
1188 rc = tf_bulk_get_tbl_entry_internal(tfp, parms);
1191 "%s, Bulk get failed, type:%d, rc:%s\n",
1192 tf_dir_2_str(parms->dir),
1200 /* API defined in tf_core.h */
1202 tf_alloc_tbl_scope(struct tf *tfp,
1203 struct tf_alloc_tbl_scope_parms *parms)
1207 TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
1209 rc = tf_alloc_eem_tbl_scope(tfp, parms);
1214 /* API defined in tf_core.h */
1216 tf_free_tbl_scope(struct tf *tfp,
1217 struct tf_free_tbl_scope_parms *parms)
1221 TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
1223 /* free table scope and all associated resources */
1224 rc = tf_free_eem_tbl_scope_cb(tfp, parms);
1230 tf_dump_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
1231 struct hcapi_cfa_em_page_tbl *tp_next)
1238 printf("pg_count:%d pg_size:0x%x\n",
1241 for (i = 0; i < tp->pg_count; i++) {
1242 pg_va = tp->pg_va_tbl[i];
1243 printf("\t%p\n", (void *)pg_va);
1244 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
1245 printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
1246 if (((pg_va[j] & 0x7) ==
1247 tfp_cpu_to_le_64(PTU_PTE_LAST |
1251 if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
1252 printf("** Invalid entry **\n");
1256 if (++k >= tp_next->pg_count) {
1257 printf("** Shouldn't get here **\n");
1264 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
1266 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
1268 struct tf_session *session;
1269 struct tf_tbl_scope_cb *tbl_scope_cb;
1270 struct hcapi_cfa_em_page_tbl *tp;
1271 struct hcapi_cfa_em_page_tbl *tp_next;
1272 struct hcapi_cfa_em_table *tbl;
1277 printf("called %s\n", __func__);
1279 /* find session struct */
1280 session = (struct tf_session *)tfp->session->core_data;
1282 /* find control block for table scope */
1283 tbl_scope_cb = tbl_scope_cb_find(session,
1285 if (tbl_scope_cb == NULL)
1286 PMD_DRV_LOG(ERR, "No table scope\n");
1288 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1289 printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
1291 for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
1292 tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
1294 ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
1300 if (tbl->pg_tbl[0].pg_va_tbl &&
1301 tbl->pg_tbl[0].pg_pa_tbl)
1303 tbl->pg_tbl[0].pg_va_tbl[0],
1304 (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
1305 for (i = 0; i < tbl->num_lvl - 1; i++) {
1306 printf("Level:%d\n", i);
1307 tp = &tbl->pg_tbl[i];
1308 tp_next = &tbl->pg_tbl[i + 1];
1309 tf_dump_link_page_table(tp, tp_next);