1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
6 /* Truflow Table APIs and supporting code */
12 #include <sys/param.h>
13 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include "hsi_struct_def_dpdk.h"
23 #include "tf_resources.h"
26 #define PTU_PTE_VALID 0x1UL
27 #define PTU_PTE_LAST 0x2UL
28 #define PTU_PTE_NEXT_TO_LAST 0x4UL
30 /* Number of pointers per page_size */
31 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
33 #define TF_EM_PG_SZ_4K (1 << 12)
34 #define TF_EM_PG_SZ_8K (1 << 13)
35 #define TF_EM_PG_SZ_64K (1 << 16)
36 #define TF_EM_PG_SZ_256K (1 << 18)
37 #define TF_EM_PG_SZ_1M (1 << 20)
38 #define TF_EM_PG_SZ_2M (1 << 21)
39 #define TF_EM_PG_SZ_4M (1 << 22)
40 #define TF_EM_PG_SZ_1G (1 << 30)
42 #define TF_EM_CTX_ID_INVALID 0xFFFF
44 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
45 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
48 * Function to free a page table
51 * Pointer to the page table to free
54 tf_em_free_pg_tbl(struct tf_em_page_tbl *tp)
58 for (i = 0; i < tp->pg_count; i++) {
59 if (!tp->pg_va_tbl[i]) {
61 "No map for page %d table %016" PRIu64 "\n",
63 (uint64_t)(uintptr_t)tp);
67 tfp_free(tp->pg_va_tbl[i]);
68 tp->pg_va_tbl[i] = NULL;
72 tfp_free(tp->pg_va_tbl);
74 tfp_free(tp->pg_pa_tbl);
79 * Function to free an EM table
82 * Pointer to the EM table to free
85 tf_em_free_page_table(struct tf_em_table *tbl)
87 struct tf_em_page_tbl *tp;
90 for (i = 0; i < tbl->num_lvl; i++) {
94 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
99 tf_em_free_pg_tbl(tp);
103 tbl->l0_dma_addr = 0;
105 tbl->num_data_pages = 0;
109 * Allocation of page tables
112 * Pointer to a TruFlow handle
115 * Page count to allocate
122 * -ENOMEM - Out of memory
125 tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,
130 struct tfp_calloc_parms parms;
132 parms.nitems = pg_count;
133 parms.size = sizeof(void *);
136 if (tfp_calloc(&parms) != 0)
139 tp->pg_va_tbl = parms.mem_va;
141 if (tfp_calloc(&parms) != 0) {
142 tfp_free(tp->pg_va_tbl);
146 tp->pg_pa_tbl = parms.mem_va;
149 tp->pg_size = pg_size;
151 for (i = 0; i < pg_count; i++) {
153 parms.size = pg_size;
154 parms.alignment = TF_EM_PAGE_ALIGNMENT;
156 if (tfp_calloc(&parms) != 0)
159 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
160 tp->pg_va_tbl[i] = parms.mem_va;
162 memset(tp->pg_va_tbl[i], 0, pg_size);
169 tf_em_free_pg_tbl(tp);
174 * Allocates EM page tables
177 * Table to allocate pages for
181 * -ENOMEM - Out of memory
184 tf_em_alloc_page_table(struct tf_em_table *tbl)
186 struct tf_em_page_tbl *tp;
191 for (i = 0; i < tbl->num_lvl; i++) {
192 tp = &tbl->pg_tbl[i];
194 rc = tf_em_alloc_pg_tbl(tp,
199 "Failed to allocate page table: lvl: %d\n",
204 for (j = 0; j < tp->pg_count; j++) {
206 "EEM: Allocated page table: size %u lvl %d cnt"
211 (uint32_t *)tp->pg_va_tbl[j],
212 (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
218 tf_em_free_page_table(tbl);
223 * Links EM page tables
226 * Pointer to page table
229 * Pointer to the next page table
232 * Flag controlling if the page table is last
235 tf_em_link_page_table(struct tf_em_page_tbl *tp,
236 struct tf_em_page_tbl *tp_next,
239 uint64_t *pg_pa = tp_next->pg_pa_tbl;
246 for (i = 0; i < tp->pg_count; i++) {
247 pg_va = tp->pg_va_tbl[i];
249 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
250 if (k == tp_next->pg_count - 2 && set_pte_last)
251 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
252 else if (k == tp_next->pg_count - 1 && set_pte_last)
253 valid = PTU_PTE_LAST | PTU_PTE_VALID;
255 valid = PTU_PTE_VALID;
257 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
258 if (++k >= tp_next->pg_count)
265 * Setup a EM page table
268 * Pointer to EM page table
271 tf_em_setup_page_table(struct tf_em_table *tbl)
273 struct tf_em_page_tbl *tp_next;
274 struct tf_em_page_tbl *tp;
275 bool set_pte_last = 0;
278 for (i = 0; i < tbl->num_lvl - 1; i++) {
279 tp = &tbl->pg_tbl[i];
280 tp_next = &tbl->pg_tbl[i + 1];
281 if (i == tbl->num_lvl - 2)
283 tf_em_link_page_table(tp, tp_next, set_pte_last);
286 tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];
287 tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];
291 * Given the page size, size of each data item (entry size),
292 * and the total number of entries needed, determine the number
293 * of page table levels and the number of data pages required.
302 * Number of entries needed
304 * [out] num_data_pages
305 * Number of pages required
308 * Success - Number of EM page levels required
309 * -ENOMEM - Out of memory
312 tf_em_size_page_tbl_lvl(uint32_t page_size,
314 uint32_t num_entries,
315 uint64_t *num_data_pages)
317 uint64_t lvl_data_size = page_size;
322 data_size = (uint64_t)num_entries * entry_size;
324 while (lvl_data_size < data_size) {
328 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
330 else if (lvl == PT_LVL_2)
331 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
332 MAX_PAGE_PTRS(page_size) * page_size;
337 *num_data_pages = roundup(data_size, page_size) / page_size;
343 * Return the number of page table pages needed to
344 * reference the given number of next level pages.
350 * Size of each EM page
353 * Number of EM page table pages
356 tf_em_page_tbl_pgcnt(uint32_t num_pages,
359 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
360 MAX_PAGE_PTRS(page_size);
365 * Given the number of data pages, page_size and the maximum
366 * number of page table levels (already determined), size
367 * the number of page table pages required at each level.
370 * Max number of levels
372 * [in] num_data_pages
373 * Number of EM data pages
382 tf_em_size_page_tbls(int max_lvl,
383 uint64_t num_data_pages,
387 if (max_lvl == PT_LVL_0) {
388 page_cnt[PT_LVL_0] = num_data_pages;
389 } else if (max_lvl == PT_LVL_1) {
390 page_cnt[PT_LVL_1] = num_data_pages;
392 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
393 } else if (max_lvl == PT_LVL_2) {
394 page_cnt[PT_LVL_2] = num_data_pages;
396 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);
398 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
405 * Size the EM table based on capabilities
412 * - EINVAL - Parameter error
413 * - ENOMEM - Out of memory
416 tf_em_size_table(struct tf_em_table *tbl)
418 uint64_t num_data_pages;
421 uint32_t num_entries;
422 uint32_t cnt = TF_EM_MIN_ENTRIES;
424 /* Ignore entry if both size and number are zero */
425 if (!tbl->entry_size && !tbl->num_entries)
428 /* If only one is set then error */
429 if (!tbl->entry_size || !tbl->num_entries)
432 /* Determine number of page table levels and the number
433 * of data pages needed to process the given eem table.
435 if (tbl->type == RECORD_TABLE) {
437 * For action records just a memory size is provided. Work
438 * backwards to resolve to number of entries
440 num_entries = tbl->num_entries / tbl->entry_size;
441 if (num_entries < TF_EM_MIN_ENTRIES) {
442 num_entries = TF_EM_MIN_ENTRIES;
444 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
449 num_entries = tbl->num_entries;
452 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
457 PMD_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
459 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
461 (uint64_t)num_entries * tbl->entry_size,
466 tbl->num_lvl = max_lvl + 1;
467 tbl->num_data_pages = num_data_pages;
469 /* Determine the number of pages needed at each level */
470 page_cnt = tbl->page_cnt;
471 memset(page_cnt, 0, sizeof(tbl->page_cnt));
472 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
475 PMD_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
477 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
479 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
489 * Unregisters EM Ctx in Firmware
492 * Pointer to a TruFlow handle
495 * Pointer to a table scope control block
498 * Receive or transmit direction
501 tf_em_ctx_unreg(struct tf *tfp,
502 struct tf_tbl_scope_cb *tbl_scope_cb,
505 struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
506 struct tf_em_table *tbl;
509 for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
510 tbl = &ctxp->em_tables[i];
512 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
513 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
514 tf_em_free_page_table(tbl);
520 * Registers EM Ctx in Firmware
523 * Pointer to a TruFlow handle
526 * Pointer to a table scope control block
529 * Receive or transmit direction
533 * -ENOMEM - Out of Memory
536 tf_em_ctx_reg(struct tf *tfp,
537 struct tf_tbl_scope_cb *tbl_scope_cb,
540 struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
541 struct tf_em_table *tbl;
545 for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
546 tbl = &ctxp->em_tables[i];
548 if (tbl->num_entries && tbl->entry_size) {
549 rc = tf_em_size_table(tbl);
554 rc = tf_em_alloc_page_table(tbl);
558 tf_em_setup_page_table(tbl);
559 rc = tf_msg_em_mem_rgtr(tfp,
561 TF_EM_PAGE_SIZE_ENUM,
571 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
576 * Validates EM number of entries requested
579 * Pointer to table scope control block to be populated
582 * Pointer to input parameters
586 * -EINVAL - Parameter error
589 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
590 struct tf_alloc_tbl_scope_parms *parms)
594 if (parms->rx_mem_size_in_mb != 0) {
595 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
596 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
598 uint32_t num_entries = (parms->rx_mem_size_in_mb *
599 TF_MEGABYTE) / (key_b + action_b);
601 if (num_entries < TF_EM_MIN_ENTRIES) {
602 PMD_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
604 parms->rx_mem_size_in_mb);
608 cnt = TF_EM_MIN_ENTRIES;
609 while (num_entries > cnt &&
610 cnt <= TF_EM_MAX_ENTRIES)
613 if (cnt > TF_EM_MAX_ENTRIES) {
614 PMD_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
616 (parms->tx_num_flows_in_k * TF_KILOBYTE));
620 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
622 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
624 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
625 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
627 "EEM: Invalid number of Rx flows "
628 "requested:%u max:%u\n",
629 parms->rx_num_flows_in_k * TF_KILOBYTE,
630 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
634 /* must be a power-of-2 supported value
635 * in the range 32K - 128M
637 cnt = TF_EM_MIN_ENTRIES;
638 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
639 cnt <= TF_EM_MAX_ENTRIES)
642 if (cnt > TF_EM_MAX_ENTRIES) {
644 "EEM: Invalid number of Rx requested: %u\n",
645 (parms->rx_num_flows_in_k * TF_KILOBYTE));
650 if (parms->tx_mem_size_in_mb != 0) {
651 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
652 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
654 uint32_t num_entries = (parms->tx_mem_size_in_mb *
655 (TF_KILOBYTE * TF_KILOBYTE)) /
658 if (num_entries < TF_EM_MIN_ENTRIES) {
660 "EEM: Insufficient memory requested:%uMB\n",
661 parms->rx_mem_size_in_mb);
665 cnt = TF_EM_MIN_ENTRIES;
666 while (num_entries > cnt &&
667 cnt <= TF_EM_MAX_ENTRIES)
670 if (cnt > TF_EM_MAX_ENTRIES) {
672 "EEM: Invalid number of Tx requested: %u\n",
673 (parms->tx_num_flows_in_k * TF_KILOBYTE));
677 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
679 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
681 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
682 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
684 "EEM: Invalid number of Tx flows "
685 "requested:%u max:%u\n",
686 (parms->tx_num_flows_in_k * TF_KILOBYTE),
687 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
691 cnt = TF_EM_MIN_ENTRIES;
692 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
693 cnt <= TF_EM_MAX_ENTRIES)
696 if (cnt > TF_EM_MAX_ENTRIES) {
698 "EEM: Invalid number of Tx requested: %u\n",
699 (parms->tx_num_flows_in_k * TF_KILOBYTE));
704 if (parms->rx_num_flows_in_k != 0 &&
705 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
707 "EEM: Rx key size required: %u\n",
708 (parms->rx_max_key_sz_in_bits));
712 if (parms->tx_num_flows_in_k != 0 &&
713 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
715 "EEM: Tx key size required: %u\n",
716 (parms->tx_max_key_sz_in_bits));
720 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =
721 parms->rx_num_flows_in_k * TF_KILOBYTE;
722 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =
723 parms->rx_max_key_sz_in_bits / 8;
725 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =
726 parms->rx_num_flows_in_k * TF_KILOBYTE;
727 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =
728 parms->rx_max_key_sz_in_bits / 8;
730 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =
731 parms->rx_num_flows_in_k * TF_KILOBYTE;
732 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =
733 parms->rx_max_action_entry_sz_in_bits / 8;
735 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =
739 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =
740 parms->tx_num_flows_in_k * TF_KILOBYTE;
741 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =
742 parms->tx_max_key_sz_in_bits / 8;
744 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =
745 parms->tx_num_flows_in_k * TF_KILOBYTE;
746 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =
747 parms->tx_max_key_sz_in_bits / 8;
749 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =
750 parms->tx_num_flows_in_k * TF_KILOBYTE;
751 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =
752 parms->tx_max_action_entry_sz_in_bits / 8;
754 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =
761 * Internal function to set a Table Entry. Supports all internal Table Types
764 * Pointer to TruFlow handle
767 * Pointer to input parameters
771 * -EINVAL - Parameter error
774 tf_set_tbl_entry_internal(struct tf *tfp,
775 struct tf_set_tbl_entry_parms *parms)
780 struct bitalloc *session_pool;
781 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
783 /* Lookup the pool using the table type of the element */
784 rc = tf_rm_lookup_tbl_type_pool(tfs,
788 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
794 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
795 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
796 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
798 "dir:%d, Type not supported, type:%d\n",
804 /* Adjust the returned index/offset as there is no guarantee
805 * that the start is 0 at time of RM allocation
807 tf_rm_convert_index(tfs,
810 TF_RM_CONVERT_RM_BASE,
814 /* Verify that the entry has been previously allocated */
815 id = ba_inuse(session_pool, index);
818 "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
826 rc = tf_msg_set_tbl_entry(tfp,
829 parms->data_sz_in_bytes,
834 "dir:%d, Set failed, type:%d, rc:%d\n",
844 * Internal function to get a Table Entry. Supports all Table Types
845 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
848 * Pointer to TruFlow handle
851 * Pointer to input parameters
855 * -EINVAL - Parameter error
858 tf_get_tbl_entry_internal(struct tf *tfp,
859 struct tf_get_tbl_entry_parms *parms)
864 struct bitalloc *session_pool;
865 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
867 /* Lookup the pool using the table type of the element */
868 rc = tf_rm_lookup_tbl_type_pool(tfs,
872 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
878 /* Adjust the returned index/offset as there is no guarantee
879 * that the start is 0 at time of RM allocation
881 tf_rm_convert_index(tfs,
884 TF_RM_CONVERT_RM_BASE,
888 /* Verify that the entry has been previously allocated */
889 id = ba_inuse(session_pool, index);
892 "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
900 rc = tf_msg_get_tbl_entry(tfp,
903 parms->data_sz_in_bytes,
908 "dir:%d, Get failed, type:%d, rc:%d\n",
919 * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
920 * the requested entry. If found the ref count is incremente and
926 * Allocation parameters
929 * 0 - Success, entry found and ref count incremented
930 * -ENOENT - Failure, entry not found
933 tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
934 struct tf_alloc_tbl_entry_parms *parms __rte_unused)
937 "dir:%d, Entry Alloc with search not supported\n",
945 * Free Tbl entry from the Shadow DB. Shadow DB is searched for
946 * the requested entry. If found the ref count is decremente and
947 * new ref_count returned.
952 * Allocation parameters
955 * 0 - Success, entry found and ref count decremented
956 * -ENOENT - Failure, entry not found
959 tf_free_tbl_entry_shadow(struct tf_session *tfs,
960 struct tf_free_tbl_entry_parms *parms)
963 "dir:%d, Entry Free with search not supported\n",
968 #endif /* TF_SHADOW */
971 * Create External Tbl pool of memory indexes.
976 * pointer to the table scope
978 * number of entries to write
979 * [in] entry_sz_bytes
983 * 0 - Success, entry allocated - no search support
984 * -ENOMEM -EINVAL -EOPNOTSUPP
985 * - Failure, entry not allocated, out of resources
988 tf_create_tbl_pool_external(enum tf_dir dir,
989 struct tf_tbl_scope_cb *tbl_scope_cb,
990 uint32_t num_entries,
991 uint32_t entry_sz_bytes)
993 struct tfp_calloc_parms parms;
997 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
999 parms.nitems = num_entries;
1000 parms.size = sizeof(uint32_t);
1001 parms.alignment = 0;
1003 if (tfp_calloc(&parms) != 0) {
1004 PMD_DRV_LOG(ERR, "%d: TBL: external pool failure %s\n",
1005 dir, strerror(-ENOMEM));
1009 /* Create empty stack
1011 rc = stack_init(num_entries, parms.mem_va, pool);
1014 PMD_DRV_LOG(ERR, "%d: TBL: stack init failure %s\n",
1015 dir, strerror(-rc));
1019 /* Save the malloced memory address so that it can
1020 * be freed when the table scope is freed.
1022 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
1024 /* Fill pool with indexes in reverse
1026 j = (num_entries - 1) * entry_sz_bytes;
1028 for (i = 0; i < num_entries; i++) {
1029 rc = stack_push(pool, j);
1031 PMD_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
1032 tf_dir_2_str(dir), strerror(-rc));
1037 PMD_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
1041 j -= entry_sz_bytes;
1044 if (!stack_is_full(pool)) {
1046 PMD_DRV_LOG(ERR, "%d TBL: stack failure %s\n",
1047 dir, strerror(-rc));
1052 tfp_free((void *)parms.mem_va);
1057 * Destroy External Tbl pool of memory indexes.
1062 * pointer to the table scope
1066 tf_destroy_tbl_pool_external(enum tf_dir dir,
1067 struct tf_tbl_scope_cb *tbl_scope_cb)
1069 uint32_t *ext_act_pool_mem =
1070 tbl_scope_cb->ext_act_pool_mem[dir];
1072 tfp_free(ext_act_pool_mem);
1076 * Allocate External Tbl entry from the Session Pool.
1079 * Pointer to Truflow Handle
1081 * Allocation parameters
1084 * 0 - Success, entry allocated - no search support
1085 * -ENOMEM -EINVAL -EOPNOTSUPP
1086 * - Failure, entry not allocated, out of resources
1089 tf_alloc_tbl_entry_pool_external(struct tf *tfp,
1090 struct tf_alloc_tbl_entry_parms *parms)
1094 struct tf_session *tfs;
1095 struct tf_tbl_scope_cb *tbl_scope_cb;
1098 /* Check parameters */
1099 if (tfp == NULL || parms == NULL) {
1100 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1104 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1106 "dir:%d, Session info invalid\n",
1111 tfs = (struct tf_session *)(tfp->session->core_data);
1113 /* Get the pool info from the table scope
1115 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1117 if (tbl_scope_cb == NULL) {
1119 "%s, table scope not allocated\n",
1120 tf_dir_2_str(parms->dir));
1123 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1125 /* Allocate an element
1127 rc = stack_pop(pool, &index);
1131 "dir:%d, Allocation failed, type:%d\n",
1141 * Allocate Internal Tbl entry from the Session Pool.
1144 * Pointer to Truflow Handle
1146 * Allocation parameters
1149 * 0 - Success, entry found and ref count decremented
1150 * -ENOMEM - Failure, entry not allocated, out of resources
1153 tf_alloc_tbl_entry_pool_internal(struct tf *tfp,
1154 struct tf_alloc_tbl_entry_parms *parms)
1160 struct bitalloc *session_pool;
1161 struct tf_session *tfs;
1163 /* Check parameters */
1164 if (tfp == NULL || parms == NULL) {
1165 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1169 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1171 "dir:%d, Session info invalid\n",
1176 tfs = (struct tf_session *)(tfp->session->core_data);
1178 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1179 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1180 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1181 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1182 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1183 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1184 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1186 "dir:%d, Type not supported, type:%d\n",
1192 /* Lookup the pool using the table type of the element */
1193 rc = tf_rm_lookup_tbl_type_pool(tfs,
1197 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1201 id = ba_alloc(session_pool);
1203 free_cnt = ba_free_count(session_pool);
1206 "dir:%d, Allocation failed, type:%d, free:%d\n",
1213 /* Adjust the returned index/offset as there is no guarantee
1214 * that the start is 0 at time of RM allocation
1216 tf_rm_convert_index(tfs,
1219 TF_RM_CONVERT_ADD_BASE,
1227 * Free External Tbl entry to the session pool.
1230 * Pointer to Truflow Handle
1232 * Allocation parameters
1235 * 0 - Success, entry freed
1237 * - Failure, entry not successfully freed for these reasons
1243 tf_free_tbl_entry_pool_external(struct tf *tfp,
1244 struct tf_free_tbl_entry_parms *parms)
1247 struct tf_session *tfs;
1249 struct tf_tbl_scope_cb *tbl_scope_cb;
1252 /* Check parameters */
1253 if (tfp == NULL || parms == NULL) {
1254 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1258 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1260 "dir:%d, Session info invalid\n",
1265 tfs = (struct tf_session *)(tfp->session->core_data);
1267 /* Get the pool info from the table scope
1269 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1271 if (tbl_scope_cb == NULL) {
1273 "dir:%d, Session info invalid\n",
1277 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1281 rc = stack_push(pool, index);
1285 "dir:%d, consistency error, stack full, type:%d, idx:%d\n",
1294 * Free Internal Tbl entry from the Session Pool.
1297 * Pointer to Truflow Handle
1299 * Allocation parameters
1302 * 0 - Success, entry found and ref count decremented
1303 * -ENOMEM - Failure, entry not allocated, out of resources
1306 tf_free_tbl_entry_pool_internal(struct tf *tfp,
1307 struct tf_free_tbl_entry_parms *parms)
1311 struct bitalloc *session_pool;
1312 struct tf_session *tfs;
1315 /* Check parameters */
1316 if (tfp == NULL || parms == NULL) {
1317 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1321 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1323 "dir:%d, Session info invalid\n",
1328 tfs = (struct tf_session *)(tfp->session->core_data);
1330 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1331 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1332 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1333 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1334 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1335 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1336 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1338 "dir:%d, Type not supported, type:%d\n",
1344 /* Lookup the pool using the table type of the element */
1345 rc = tf_rm_lookup_tbl_type_pool(tfs,
1349 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1355 /* Adjust the returned index/offset as there is no guarantee
1356 * that the start is 0 at time of RM allocation
1358 tf_rm_convert_index(tfs,
1361 TF_RM_CONVERT_RM_BASE,
1365 /* Check if element was indeed allocated */
1366 id = ba_inuse_free(session_pool, index);
1369 "dir:%d, Element not previously alloc'ed, type:%d, idx:%d\n",
1379 /* API defined in tf_em.h */
1380 struct tf_tbl_scope_cb *
1381 tbl_scope_cb_find(struct tf_session *session,
1382 uint32_t tbl_scope_id)
1386 /* Check that id is valid */
1387 i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
1391 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
1392 if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
1393 return &session->tbl_scopes[i];
1399 /* API defined in tf_core.h */
1401 tf_free_eem_tbl_scope_cb(struct tf *tfp,
1402 struct tf_free_tbl_scope_parms *parms)
1406 struct tf_tbl_scope_cb *tbl_scope_cb;
1407 struct tf_session *session;
1409 session = (struct tf_session *)(tfp->session->core_data);
1411 tbl_scope_cb = tbl_scope_cb_find(session,
1412 parms->tbl_scope_id);
1414 if (tbl_scope_cb == NULL)
1417 /* Free Table control block */
1418 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1420 /* free table scope locks */
1421 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1422 /* Free associated external pools
1424 tf_destroy_tbl_pool_external(dir,
1428 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1430 /* free table scope and all associated resources */
1431 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1437 /* API defined in tf_em.h */
1439 tf_alloc_eem_tbl_scope(struct tf *tfp,
1440 struct tf_alloc_tbl_scope_parms *parms)
1444 struct tf_tbl_scope_cb *tbl_scope_cb;
1445 struct tf_em_table *em_tables;
1447 struct tf_session *session;
1448 struct tf_free_tbl_scope_parms free_parms;
1450 /* check parameters */
1451 if (parms == NULL || tfp->session == NULL) {
1452 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1456 session = (struct tf_session *)tfp->session->core_data;
1458 /* Get Table Scope control block from the session pool */
1459 index = ba_alloc(session->tbl_scope_pool_rx);
1461 PMD_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
1466 tbl_scope_cb = &session->tbl_scopes[index];
1467 tbl_scope_cb->index = index;
1468 tbl_scope_cb->tbl_scope_id = index;
1469 parms->tbl_scope_id = index;
1471 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1472 rc = tf_msg_em_qcaps(tfp,
1474 &tbl_scope_cb->em_caps[dir]);
1477 "EEM: Unable to query for EEM capability\n");
1483 * Validate and setup table sizes
1485 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1488 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1490 * Allocate tables and signal configuration to FW
1492 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1495 "EEM: Unable to register for EEM ctx\n");
1499 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1500 rc = tf_msg_em_cfg(tfp,
1501 em_tables[KEY0_TABLE].num_entries,
1502 em_tables[KEY0_TABLE].ctx_id,
1503 em_tables[KEY1_TABLE].ctx_id,
1504 em_tables[RECORD_TABLE].ctx_id,
1505 em_tables[EFC_TABLE].ctx_id,
1506 parms->hw_flow_cache_flush_timer,
1510 "TBL: Unable to configure EEM in firmware\n");
1514 rc = tf_msg_em_op(tfp,
1516 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1520 "EEM: Unable to enable EEM in firmware\n");
1524 /* Allocate the pool of offsets of the external memory.
1525 * Initially, this is a single fixed size pool for all external
1526 * actions related to a single table scope.
1528 rc = tf_create_tbl_pool_external(dir,
1530 em_tables[RECORD_TABLE].num_entries,
1531 em_tables[RECORD_TABLE].entry_size);
1534 "%d TBL: Unable to allocate idx pools %s\n",
1544 free_parms.tbl_scope_id = index;
1545 tf_free_eem_tbl_scope_cb(tfp, &free_parms);
1549 /* Free Table control block */
1550 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1554 /* API defined in tf_core.h */
1556 tf_set_tbl_entry(struct tf *tfp,
1557 struct tf_set_tbl_entry_parms *parms)
1560 struct tf_tbl_scope_cb *tbl_scope_cb;
1561 struct tf_session *session;
1563 if (tfp == NULL || parms == NULL || parms->data == NULL)
1566 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1568 "dir:%d, Session info invalid\n",
1573 if (parms->type == TF_TBL_TYPE_EXT) {
1575 uint32_t offset = parms->idx;
1576 uint32_t tbl_scope_id;
1578 session = (struct tf_session *)(tfp->session->core_data);
1580 tbl_scope_id = parms->tbl_scope_id;
1582 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1584 "dir:%d, Table scope not allocated\n",
1589 /* Get the table scope control block associated with the
1592 tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);
1594 if (tbl_scope_cb == NULL)
1597 /* External table, implicitly the Action table */
1598 base_addr = tf_em_get_table_page(tbl_scope_cb,
1602 if (base_addr == NULL) {
1604 "dir:%d, Base address lookup failed\n",
1609 offset %= TF_EM_PAGE_SIZE;
1610 rte_memcpy((char *)base_addr + offset,
1612 parms->data_sz_in_bytes);
1614 /* Internal table type processing */
1615 rc = tf_set_tbl_entry_internal(tfp, parms);
1618 "dir:%d, Set failed, type:%d, rc:%d\n",
1628 /* API defined in tf_core.h */
1630 tf_get_tbl_entry(struct tf *tfp,
1631 struct tf_get_tbl_entry_parms *parms)
1635 if (tfp == NULL || parms == NULL)
1638 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1640 "dir:%d, Session info invalid\n",
1645 if (parms->type == TF_TBL_TYPE_EXT) {
1647 "dir:%d, External table type not supported\n",
1652 /* Internal table type processing */
1653 rc = tf_get_tbl_entry_internal(tfp, parms);
1656 "dir:%d, Get failed, type:%d, rc:%d\n",
1665 /* API defined in tf_core.h */
1667 tf_alloc_tbl_scope(struct tf *tfp,
1668 struct tf_alloc_tbl_scope_parms *parms)
1672 /* check parameters */
1673 if (parms == NULL || tfp == NULL) {
1674 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1678 rc = tf_alloc_eem_tbl_scope(tfp, parms);
1683 /* API defined in tf_core.h */
1685 tf_free_tbl_scope(struct tf *tfp,
1686 struct tf_free_tbl_scope_parms *parms)
1690 /* check parameters */
1691 if (parms == NULL || tfp == NULL) {
1692 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1696 /* free table scope and all associated resources */
1697 rc = tf_free_eem_tbl_scope_cb(tfp, parms);
1702 /* API defined in tf_core.h */
1704 tf_alloc_tbl_entry(struct tf *tfp,
1705 struct tf_alloc_tbl_entry_parms *parms)
1708 #if (TF_SHADOW == 1)
1709 struct tf_session *tfs;
1710 #endif /* TF_SHADOW */
1712 /* Check parameters */
1713 if (parms == NULL || tfp == NULL) {
1714 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1718 * No shadow copy support for external tables, allocate and return
1720 if (parms->type == TF_TBL_TYPE_EXT) {
1721 rc = tf_alloc_tbl_entry_pool_external(tfp, parms);
1725 #if (TF_SHADOW == 1)
1726 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1728 "dir:%d, Session info invalid\n",
1733 tfs = (struct tf_session *)(tfp->session->core_data);
1735 /* Search the Shadow DB for requested element. If not found go
1736 * allocate one from the Session Pool
1738 if (parms->search_enable && tfs->shadow_copy) {
1739 rc = tf_alloc_tbl_entry_shadow(tfs, parms);
1740 /* Entry found and parms populated with return data */
1744 #endif /* TF_SHADOW */
1746 rc = tf_alloc_tbl_entry_pool_internal(tfp, parms);
1748 PMD_DRV_LOG(ERR, "dir%d, Alloc failed, rc:%d\n",
1755 /* API defined in tf_core.h */
1757 tf_free_tbl_entry(struct tf *tfp,
1758 struct tf_free_tbl_entry_parms *parms)
1761 #if (TF_SHADOW == 1)
1762 struct tf_session *tfs;
1763 #endif /* TF_SHADOW */
1765 /* Check parameters */
1766 if (parms == NULL || tfp == NULL) {
1767 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1771 * No shadow of external tables so just free the entry
1773 if (parms->type == TF_TBL_TYPE_EXT) {
1774 rc = tf_free_tbl_entry_pool_external(tfp, parms);
1778 #if (TF_SHADOW == 1)
1779 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1781 "dir:%d, Session info invalid\n",
1786 tfs = (struct tf_session *)(tfp->session->core_data);
1788 /* Search the Shadow DB for requested element. If not found go
1789 * allocate one from the Session Pool
1791 if (parms->search_enable && tfs->shadow_copy) {
1792 rc = tf_free_tbl_entry_shadow(tfs, parms);
1793 /* Entry free'ed and parms populated with return data */
1797 #endif /* TF_SHADOW */
1799 rc = tf_free_tbl_entry_pool_internal(tfp, parms);
1802 PMD_DRV_LOG(ERR, "dir:%d, Alloc failed, rc:%d\n",
1810 tf_dump_link_page_table(struct tf_em_page_tbl *tp,
1811 struct tf_em_page_tbl *tp_next)
1818 printf("pg_count:%d pg_size:0x%x\n",
1821 for (i = 0; i < tp->pg_count; i++) {
1822 pg_va = tp->pg_va_tbl[i];
1823 printf("\t%p\n", (void *)pg_va);
1824 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
1825 printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
1826 if (((pg_va[j] & 0x7) ==
1827 tfp_cpu_to_le_64(PTU_PTE_LAST |
1831 if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
1832 printf("** Invalid entry **\n");
1836 if (++k >= tp_next->pg_count) {
1837 printf("** Shouldn't get here **\n");
1844 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
1846 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
1848 struct tf_session *session;
1849 struct tf_tbl_scope_cb *tbl_scope_cb;
1850 struct tf_em_page_tbl *tp;
1851 struct tf_em_page_tbl *tp_next;
1852 struct tf_em_table *tbl;
1857 printf("called %s\n", __func__);
1859 /* find session struct */
1860 session = (struct tf_session *)tfp->session->core_data;
1862 /* find control block for table scope */
1863 tbl_scope_cb = tbl_scope_cb_find(session,
1865 if (tbl_scope_cb == NULL)
1866 PMD_DRV_LOG(ERR, "No table scope\n");
1868 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1869 printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
1871 for (j = KEY0_TABLE; j < MAX_TABLE; j++) {
1872 tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
1874 ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
1880 if (tbl->pg_tbl[0].pg_va_tbl &&
1881 tbl->pg_tbl[0].pg_pa_tbl)
1883 tbl->pg_tbl[0].pg_va_tbl[0],
1884 (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
1885 for (i = 0; i < tbl->num_lvl - 1; i++) {
1886 printf("Level:%d\n", i);
1887 tp = &tbl->pg_tbl[i];
1888 tp_next = &tbl->pg_tbl[i + 1];
1889 tf_dump_link_page_table(tp, tp_next);