1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
6 /* Truflow Table APIs and supporting code */
12 #include <sys/param.h>
13 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include "hsi_struct_def_dpdk.h"
24 #include "tf_resources.h"
26 #include "tf_common.h"
28 #define PTU_PTE_VALID 0x1UL
29 #define PTU_PTE_LAST 0x2UL
30 #define PTU_PTE_NEXT_TO_LAST 0x4UL
32 /* Number of pointers per page_size */
33 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
35 #define TF_EM_PG_SZ_4K (1 << 12)
36 #define TF_EM_PG_SZ_8K (1 << 13)
37 #define TF_EM_PG_SZ_64K (1 << 16)
38 #define TF_EM_PG_SZ_256K (1 << 18)
39 #define TF_EM_PG_SZ_1M (1 << 20)
40 #define TF_EM_PG_SZ_2M (1 << 21)
41 #define TF_EM_PG_SZ_4M (1 << 22)
42 #define TF_EM_PG_SZ_1G (1 << 30)
44 #define TF_EM_CTX_ID_INVALID 0xFFFF
46 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
47 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
50 * Function to free a page table
53 * Pointer to the page table to free
56 tf_em_free_pg_tbl(struct tf_em_page_tbl *tp)
60 for (i = 0; i < tp->pg_count; i++) {
61 if (!tp->pg_va_tbl[i]) {
63 "No map for page %d table %016" PRIu64 "\n",
65 (uint64_t)(uintptr_t)tp);
69 tfp_free(tp->pg_va_tbl[i]);
70 tp->pg_va_tbl[i] = NULL;
74 tfp_free(tp->pg_va_tbl);
76 tfp_free(tp->pg_pa_tbl);
81 * Function to free an EM table
84 * Pointer to the EM table to free
87 tf_em_free_page_table(struct tf_em_table *tbl)
89 struct tf_em_page_tbl *tp;
92 for (i = 0; i < tbl->num_lvl; i++) {
96 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
101 tf_em_free_pg_tbl(tp);
105 tbl->l0_dma_addr = 0;
107 tbl->num_data_pages = 0;
111 * Allocation of page tables
114 * Pointer to a TruFlow handle
117 * Page count to allocate
124 * -ENOMEM - Out of memory
127 tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,
132 struct tfp_calloc_parms parms;
134 parms.nitems = pg_count;
135 parms.size = sizeof(void *);
138 if (tfp_calloc(&parms) != 0)
141 tp->pg_va_tbl = parms.mem_va;
143 if (tfp_calloc(&parms) != 0) {
144 tfp_free(tp->pg_va_tbl);
148 tp->pg_pa_tbl = parms.mem_va;
151 tp->pg_size = pg_size;
153 for (i = 0; i < pg_count; i++) {
155 parms.size = pg_size;
156 parms.alignment = TF_EM_PAGE_ALIGNMENT;
158 if (tfp_calloc(&parms) != 0)
161 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
162 tp->pg_va_tbl[i] = parms.mem_va;
164 memset(tp->pg_va_tbl[i], 0, pg_size);
171 tf_em_free_pg_tbl(tp);
176 * Allocates EM page tables
179 * Table to allocate pages for
183 * -ENOMEM - Out of memory
186 tf_em_alloc_page_table(struct tf_em_table *tbl)
188 struct tf_em_page_tbl *tp;
193 for (i = 0; i < tbl->num_lvl; i++) {
194 tp = &tbl->pg_tbl[i];
196 rc = tf_em_alloc_pg_tbl(tp,
201 "Failed to allocate page table: lvl: %d\n",
206 for (j = 0; j < tp->pg_count; j++) {
208 "EEM: Allocated page table: size %u lvl %d cnt"
213 (uint32_t *)tp->pg_va_tbl[j],
214 (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
220 tf_em_free_page_table(tbl);
225 * Links EM page tables
228 * Pointer to page table
231 * Pointer to the next page table
234 * Flag controlling if the page table is last
237 tf_em_link_page_table(struct tf_em_page_tbl *tp,
238 struct tf_em_page_tbl *tp_next,
241 uint64_t *pg_pa = tp_next->pg_pa_tbl;
248 for (i = 0; i < tp->pg_count; i++) {
249 pg_va = tp->pg_va_tbl[i];
251 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
252 if (k == tp_next->pg_count - 2 && set_pte_last)
253 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
254 else if (k == tp_next->pg_count - 1 && set_pte_last)
255 valid = PTU_PTE_LAST | PTU_PTE_VALID;
257 valid = PTU_PTE_VALID;
259 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
260 if (++k >= tp_next->pg_count)
267 * Setup a EM page table
270 * Pointer to EM page table
273 tf_em_setup_page_table(struct tf_em_table *tbl)
275 struct tf_em_page_tbl *tp_next;
276 struct tf_em_page_tbl *tp;
277 bool set_pte_last = 0;
280 for (i = 0; i < tbl->num_lvl - 1; i++) {
281 tp = &tbl->pg_tbl[i];
282 tp_next = &tbl->pg_tbl[i + 1];
283 if (i == tbl->num_lvl - 2)
285 tf_em_link_page_table(tp, tp_next, set_pte_last);
288 tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];
289 tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];
293 * Given the page size, size of each data item (entry size),
294 * and the total number of entries needed, determine the number
295 * of page table levels and the number of data pages required.
304 * Number of entries needed
306 * [out] num_data_pages
307 * Number of pages required
310 * Success - Number of EM page levels required
311 * -ENOMEM - Out of memory
314 tf_em_size_page_tbl_lvl(uint32_t page_size,
316 uint32_t num_entries,
317 uint64_t *num_data_pages)
319 uint64_t lvl_data_size = page_size;
324 data_size = (uint64_t)num_entries * entry_size;
326 while (lvl_data_size < data_size) {
330 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
332 else if (lvl == PT_LVL_2)
333 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
334 MAX_PAGE_PTRS(page_size) * page_size;
339 *num_data_pages = roundup(data_size, page_size) / page_size;
345 * Return the number of page table pages needed to
346 * reference the given number of next level pages.
352 * Size of each EM page
355 * Number of EM page table pages
358 tf_em_page_tbl_pgcnt(uint32_t num_pages,
361 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
362 MAX_PAGE_PTRS(page_size);
367 * Given the number of data pages, page_size and the maximum
368 * number of page table levels (already determined), size
369 * the number of page table pages required at each level.
372 * Max number of levels
374 * [in] num_data_pages
375 * Number of EM data pages
384 tf_em_size_page_tbls(int max_lvl,
385 uint64_t num_data_pages,
389 if (max_lvl == PT_LVL_0) {
390 page_cnt[PT_LVL_0] = num_data_pages;
391 } else if (max_lvl == PT_LVL_1) {
392 page_cnt[PT_LVL_1] = num_data_pages;
394 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
395 } else if (max_lvl == PT_LVL_2) {
396 page_cnt[PT_LVL_2] = num_data_pages;
398 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);
400 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
407 * Size the EM table based on capabilities
414 * - EINVAL - Parameter error
415 * - ENOMEM - Out of memory
418 tf_em_size_table(struct tf_em_table *tbl)
420 uint64_t num_data_pages;
423 uint32_t num_entries;
424 uint32_t cnt = TF_EM_MIN_ENTRIES;
426 /* Ignore entry if both size and number are zero */
427 if (!tbl->entry_size && !tbl->num_entries)
430 /* If only one is set then error */
431 if (!tbl->entry_size || !tbl->num_entries)
434 /* Determine number of page table levels and the number
435 * of data pages needed to process the given eem table.
437 if (tbl->type == RECORD_TABLE) {
439 * For action records just a memory size is provided. Work
440 * backwards to resolve to number of entries
442 num_entries = tbl->num_entries / tbl->entry_size;
443 if (num_entries < TF_EM_MIN_ENTRIES) {
444 num_entries = TF_EM_MIN_ENTRIES;
446 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
451 num_entries = tbl->num_entries;
454 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
459 PMD_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
461 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
463 (uint64_t)num_entries * tbl->entry_size,
468 tbl->num_lvl = max_lvl + 1;
469 tbl->num_data_pages = num_data_pages;
471 /* Determine the number of pages needed at each level */
472 page_cnt = tbl->page_cnt;
473 memset(page_cnt, 0, sizeof(tbl->page_cnt));
474 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
477 PMD_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
479 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
481 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
491 * Unregisters EM Ctx in Firmware
494 * Pointer to a TruFlow handle
497 * Pointer to a table scope control block
500 * Receive or transmit direction
503 tf_em_ctx_unreg(struct tf *tfp,
504 struct tf_tbl_scope_cb *tbl_scope_cb,
507 struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
508 struct tf_em_table *tbl;
511 for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
512 tbl = &ctxp->em_tables[i];
514 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
515 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
516 tf_em_free_page_table(tbl);
522 * Registers EM Ctx in Firmware
525 * Pointer to a TruFlow handle
528 * Pointer to a table scope control block
531 * Receive or transmit direction
535 * -ENOMEM - Out of Memory
538 tf_em_ctx_reg(struct tf *tfp,
539 struct tf_tbl_scope_cb *tbl_scope_cb,
542 struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
543 struct tf_em_table *tbl;
547 for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
548 tbl = &ctxp->em_tables[i];
550 if (tbl->num_entries && tbl->entry_size) {
551 rc = tf_em_size_table(tbl);
556 rc = tf_em_alloc_page_table(tbl);
560 tf_em_setup_page_table(tbl);
561 rc = tf_msg_em_mem_rgtr(tfp,
563 TF_EM_PAGE_SIZE_ENUM,
573 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
578 * Validates EM number of entries requested
581 * Pointer to table scope control block to be populated
584 * Pointer to input parameters
588 * -EINVAL - Parameter error
591 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
592 struct tf_alloc_tbl_scope_parms *parms)
596 if (parms->rx_mem_size_in_mb != 0) {
597 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
598 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
600 uint32_t num_entries = (parms->rx_mem_size_in_mb *
601 TF_MEGABYTE) / (key_b + action_b);
603 if (num_entries < TF_EM_MIN_ENTRIES) {
604 PMD_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
606 parms->rx_mem_size_in_mb);
610 cnt = TF_EM_MIN_ENTRIES;
611 while (num_entries > cnt &&
612 cnt <= TF_EM_MAX_ENTRIES)
615 if (cnt > TF_EM_MAX_ENTRIES) {
616 PMD_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
618 (parms->tx_num_flows_in_k * TF_KILOBYTE));
622 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
624 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
626 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
627 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
629 "EEM: Invalid number of Rx flows "
630 "requested:%u max:%u\n",
631 parms->rx_num_flows_in_k * TF_KILOBYTE,
632 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
636 /* must be a power-of-2 supported value
637 * in the range 32K - 128M
639 cnt = TF_EM_MIN_ENTRIES;
640 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
641 cnt <= TF_EM_MAX_ENTRIES)
644 if (cnt > TF_EM_MAX_ENTRIES) {
646 "EEM: Invalid number of Rx requested: %u\n",
647 (parms->rx_num_flows_in_k * TF_KILOBYTE));
652 if (parms->tx_mem_size_in_mb != 0) {
653 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
654 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
656 uint32_t num_entries = (parms->tx_mem_size_in_mb *
657 (TF_KILOBYTE * TF_KILOBYTE)) /
660 if (num_entries < TF_EM_MIN_ENTRIES) {
662 "EEM: Insufficient memory requested:%uMB\n",
663 parms->rx_mem_size_in_mb);
667 cnt = TF_EM_MIN_ENTRIES;
668 while (num_entries > cnt &&
669 cnt <= TF_EM_MAX_ENTRIES)
672 if (cnt > TF_EM_MAX_ENTRIES) {
674 "EEM: Invalid number of Tx requested: %u\n",
675 (parms->tx_num_flows_in_k * TF_KILOBYTE));
679 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
681 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
683 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
684 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
686 "EEM: Invalid number of Tx flows "
687 "requested:%u max:%u\n",
688 (parms->tx_num_flows_in_k * TF_KILOBYTE),
689 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
693 cnt = TF_EM_MIN_ENTRIES;
694 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
695 cnt <= TF_EM_MAX_ENTRIES)
698 if (cnt > TF_EM_MAX_ENTRIES) {
700 "EEM: Invalid number of Tx requested: %u\n",
701 (parms->tx_num_flows_in_k * TF_KILOBYTE));
706 if (parms->rx_num_flows_in_k != 0 &&
707 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
709 "EEM: Rx key size required: %u\n",
710 (parms->rx_max_key_sz_in_bits));
714 if (parms->tx_num_flows_in_k != 0 &&
715 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
717 "EEM: Tx key size required: %u\n",
718 (parms->tx_max_key_sz_in_bits));
722 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =
723 parms->rx_num_flows_in_k * TF_KILOBYTE;
724 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =
725 parms->rx_max_key_sz_in_bits / 8;
727 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =
728 parms->rx_num_flows_in_k * TF_KILOBYTE;
729 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =
730 parms->rx_max_key_sz_in_bits / 8;
732 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =
733 parms->rx_num_flows_in_k * TF_KILOBYTE;
734 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =
735 parms->rx_max_action_entry_sz_in_bits / 8;
737 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =
741 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =
742 parms->tx_num_flows_in_k * TF_KILOBYTE;
743 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =
744 parms->tx_max_key_sz_in_bits / 8;
746 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =
747 parms->tx_num_flows_in_k * TF_KILOBYTE;
748 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =
749 parms->tx_max_key_sz_in_bits / 8;
751 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =
752 parms->tx_num_flows_in_k * TF_KILOBYTE;
753 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =
754 parms->tx_max_action_entry_sz_in_bits / 8;
756 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =
763 * Internal function to set a Table Entry. Supports all internal Table Types
766 * Pointer to TruFlow handle
769 * Pointer to input parameters
773 * -EINVAL - Parameter error
776 tf_set_tbl_entry_internal(struct tf *tfp,
777 struct tf_set_tbl_entry_parms *parms)
782 struct bitalloc *session_pool;
783 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
785 /* Lookup the pool using the table type of the element */
786 rc = tf_rm_lookup_tbl_type_pool(tfs,
790 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
796 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
797 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
798 parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
799 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
801 "dir:%d, Type not supported, type:%d\n",
807 /* Adjust the returned index/offset as there is no guarantee
808 * that the start is 0 at time of RM allocation
810 tf_rm_convert_index(tfs,
813 TF_RM_CONVERT_RM_BASE,
817 /* Verify that the entry has been previously allocated */
818 id = ba_inuse(session_pool, index);
821 "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
829 rc = tf_msg_set_tbl_entry(tfp,
832 parms->data_sz_in_bytes,
837 "dir:%d, Set failed, type:%d, rc:%d\n",
847 * Internal function to get a Table Entry. Supports all Table Types
848 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
851 * Pointer to TruFlow handle
854 * Pointer to input parameters
858 * -EINVAL - Parameter error
861 tf_get_tbl_entry_internal(struct tf *tfp,
862 struct tf_get_tbl_entry_parms *parms)
867 struct bitalloc *session_pool;
868 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
870 /* Lookup the pool using the table type of the element */
871 rc = tf_rm_lookup_tbl_type_pool(tfs,
875 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
881 /* Adjust the returned index/offset as there is no guarantee
882 * that the start is 0 at time of RM allocation
884 tf_rm_convert_index(tfs,
887 TF_RM_CONVERT_RM_BASE,
891 /* Verify that the entry has been previously allocated */
892 id = ba_inuse(session_pool, index);
895 "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
903 rc = tf_msg_get_tbl_entry(tfp,
906 parms->data_sz_in_bytes,
911 "dir:%d, Get failed, type:%d, rc:%d\n",
921 * Internal function to get a Table Entry. Supports all Table Types
922 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
925 * Pointer to TruFlow handle
928 * Pointer to input parameters
932 * -EINVAL - Parameter error
935 tf_get_bulk_tbl_entry_internal(struct tf *tfp,
936 struct tf_get_bulk_tbl_entry_parms *parms)
941 struct bitalloc *session_pool;
942 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
944 /* Lookup the pool using the table type of the element */
945 rc = tf_rm_lookup_tbl_type_pool(tfs,
949 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
953 index = parms->starting_idx;
956 * Adjust the returned index/offset as there is no guarantee
957 * that the start is 0 at time of RM allocation
959 tf_rm_convert_index(tfs,
962 TF_RM_CONVERT_RM_BASE,
966 /* Verify that the entry has been previously allocated */
967 id = ba_inuse(session_pool, index);
970 "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
971 tf_dir_2_str(parms->dir),
978 rc = tf_msg_get_bulk_tbl_entry(tfp, parms);
981 "%s, Bulk get failed, type:%d, rc:%s\n",
982 tf_dir_2_str(parms->dir),
992 * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
993 * the requested entry. If found the ref count is incremente and
999 * Allocation parameters
1002 * 0 - Success, entry found and ref count incremented
1003 * -ENOENT - Failure, entry not found
1006 tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
1007 struct tf_alloc_tbl_entry_parms *parms __rte_unused)
1010 "dir:%d, Entry Alloc with search not supported\n",
1018 * Free Tbl entry from the Shadow DB. Shadow DB is searched for
1019 * the requested entry. If found the ref count is decremente and
1020 * new ref_count returned.
1023 * Pointer to session
1025 * Allocation parameters
1028 * 0 - Success, entry found and ref count decremented
1029 * -ENOENT - Failure, entry not found
1032 tf_free_tbl_entry_shadow(struct tf_session *tfs,
1033 struct tf_free_tbl_entry_parms *parms)
1036 "dir:%d, Entry Free with search not supported\n",
1041 #endif /* TF_SHADOW */
1044 * Create External Tbl pool of memory indexes.
1049 * pointer to the table scope
1051 * number of entries to write
1052 * [in] entry_sz_bytes
1053 * size of each entry
1056 * 0 - Success, entry allocated - no search support
1057 * -ENOMEM -EINVAL -EOPNOTSUPP
1058 * - Failure, entry not allocated, out of resources
1061 tf_create_tbl_pool_external(enum tf_dir dir,
1062 struct tf_tbl_scope_cb *tbl_scope_cb,
1063 uint32_t num_entries,
1064 uint32_t entry_sz_bytes)
1066 struct tfp_calloc_parms parms;
1070 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
1072 parms.nitems = num_entries;
1073 parms.size = sizeof(uint32_t);
1074 parms.alignment = 0;
1076 if (tfp_calloc(&parms) != 0) {
1077 PMD_DRV_LOG(ERR, "%d: TBL: external pool failure %s\n",
1078 dir, strerror(-ENOMEM));
1082 /* Create empty stack
1084 rc = stack_init(num_entries, parms.mem_va, pool);
1087 PMD_DRV_LOG(ERR, "%d: TBL: stack init failure %s\n",
1088 dir, strerror(-rc));
1092 /* Save the malloced memory address so that it can
1093 * be freed when the table scope is freed.
1095 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
1097 /* Fill pool with indexes in reverse
1099 j = (num_entries - 1) * entry_sz_bytes;
1101 for (i = 0; i < num_entries; i++) {
1102 rc = stack_push(pool, j);
1104 PMD_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
1105 tf_dir_2_str(dir), strerror(-rc));
1110 PMD_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
1114 j -= entry_sz_bytes;
1117 if (!stack_is_full(pool)) {
1119 PMD_DRV_LOG(ERR, "%d TBL: stack failure %s\n",
1120 dir, strerror(-rc));
1125 tfp_free((void *)parms.mem_va);
1130 * Destroy External Tbl pool of memory indexes.
1135 * pointer to the table scope
1139 tf_destroy_tbl_pool_external(enum tf_dir dir,
1140 struct tf_tbl_scope_cb *tbl_scope_cb)
1142 uint32_t *ext_act_pool_mem =
1143 tbl_scope_cb->ext_act_pool_mem[dir];
1145 tfp_free(ext_act_pool_mem);
1149 * Allocate External Tbl entry from the Session Pool.
1152 * Pointer to Truflow Handle
1154 * Allocation parameters
1157 * 0 - Success, entry allocated - no search support
1158 * -ENOMEM -EINVAL -EOPNOTSUPP
1159 * - Failure, entry not allocated, out of resources
1162 tf_alloc_tbl_entry_pool_external(struct tf *tfp,
1163 struct tf_alloc_tbl_entry_parms *parms)
1167 struct tf_session *tfs;
1168 struct tf_tbl_scope_cb *tbl_scope_cb;
1171 /* Check parameters */
1172 if (tfp == NULL || parms == NULL) {
1173 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1177 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1179 "dir:%d, Session info invalid\n",
1184 tfs = (struct tf_session *)(tfp->session->core_data);
1186 /* Get the pool info from the table scope
1188 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1190 if (tbl_scope_cb == NULL) {
1192 "%s, table scope not allocated\n",
1193 tf_dir_2_str(parms->dir));
1196 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1198 /* Allocate an element
1200 rc = stack_pop(pool, &index);
1204 "dir:%d, Allocation failed, type:%d\n",
1214 * Allocate Internal Tbl entry from the Session Pool.
1217 * Pointer to Truflow Handle
1219 * Allocation parameters
1222 * 0 - Success, entry found and ref count decremented
1223 * -ENOMEM - Failure, entry not allocated, out of resources
1226 tf_alloc_tbl_entry_pool_internal(struct tf *tfp,
1227 struct tf_alloc_tbl_entry_parms *parms)
1233 struct bitalloc *session_pool;
1234 struct tf_session *tfs;
1236 /* Check parameters */
1237 if (tfp == NULL || parms == NULL) {
1238 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1242 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1244 "dir:%d, Session info invalid\n",
1249 tfs = (struct tf_session *)(tfp->session->core_data);
1251 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1252 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1253 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1254 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1255 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1256 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1257 parms->type != TF_TBL_TYPE_MIRROR_CONFIG &&
1258 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1260 "dir:%d, Type not supported, type:%d\n",
1266 /* Lookup the pool using the table type of the element */
1267 rc = tf_rm_lookup_tbl_type_pool(tfs,
1271 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1275 id = ba_alloc(session_pool);
1277 free_cnt = ba_free_count(session_pool);
1280 "dir:%d, Allocation failed, type:%d, free:%d\n",
1287 /* Adjust the returned index/offset as there is no guarantee
1288 * that the start is 0 at time of RM allocation
1290 tf_rm_convert_index(tfs,
1293 TF_RM_CONVERT_ADD_BASE,
1301 * Free External Tbl entry to the session pool.
1304 * Pointer to Truflow Handle
1306 * Allocation parameters
1309 * 0 - Success, entry freed
1311 * - Failure, entry not successfully freed for these reasons
1317 tf_free_tbl_entry_pool_external(struct tf *tfp,
1318 struct tf_free_tbl_entry_parms *parms)
1321 struct tf_session *tfs;
1323 struct tf_tbl_scope_cb *tbl_scope_cb;
1326 /* Check parameters */
1327 if (tfp == NULL || parms == NULL) {
1328 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1332 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1334 "dir:%d, Session info invalid\n",
1339 tfs = (struct tf_session *)(tfp->session->core_data);
1341 /* Get the pool info from the table scope
1343 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1345 if (tbl_scope_cb == NULL) {
1347 "dir:%d, Session info invalid\n",
1351 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1355 rc = stack_push(pool, index);
1359 "dir:%d, consistency error, stack full, type:%d, idx:%d\n",
1368 * Free Internal Tbl entry from the Session Pool.
1371 * Pointer to Truflow Handle
1373 * Allocation parameters
1376 * 0 - Success, entry found and ref count decremented
1377 * -ENOMEM - Failure, entry not allocated, out of resources
1380 tf_free_tbl_entry_pool_internal(struct tf *tfp,
1381 struct tf_free_tbl_entry_parms *parms)
1385 struct bitalloc *session_pool;
1386 struct tf_session *tfs;
1389 /* Check parameters */
1390 if (tfp == NULL || parms == NULL) {
1391 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1395 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1397 "dir:%d, Session info invalid\n",
1402 tfs = (struct tf_session *)(tfp->session->core_data);
1404 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1405 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1406 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1407 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1408 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1409 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1410 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1412 "dir:%d, Type not supported, type:%d\n",
1418 /* Lookup the pool using the table type of the element */
1419 rc = tf_rm_lookup_tbl_type_pool(tfs,
1423 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1429 /* Adjust the returned index/offset as there is no guarantee
1430 * that the start is 0 at time of RM allocation
1432 tf_rm_convert_index(tfs,
1435 TF_RM_CONVERT_RM_BASE,
1439 /* Check if element was indeed allocated */
1440 id = ba_inuse_free(session_pool, index);
1443 "dir:%d, Element not previously alloc'ed, type:%d, idx:%d\n",
1453 /* API defined in tf_em.h */
1454 struct tf_tbl_scope_cb *
1455 tbl_scope_cb_find(struct tf_session *session,
1456 uint32_t tbl_scope_id)
1460 /* Check that id is valid */
1461 i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
1465 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
1466 if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
1467 return &session->tbl_scopes[i];
1473 /* API defined in tf_core.h */
1475 tf_free_eem_tbl_scope_cb(struct tf *tfp,
1476 struct tf_free_tbl_scope_parms *parms)
1480 struct tf_tbl_scope_cb *tbl_scope_cb;
1481 struct tf_session *session;
1483 session = (struct tf_session *)(tfp->session->core_data);
1485 tbl_scope_cb = tbl_scope_cb_find(session,
1486 parms->tbl_scope_id);
1488 if (tbl_scope_cb == NULL)
1491 /* Free Table control block */
1492 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1494 /* free table scope locks */
1495 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1496 /* Free associated external pools
1498 tf_destroy_tbl_pool_external(dir,
1502 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1504 /* free table scope and all associated resources */
1505 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1511 /* API defined in tf_em.h */
1513 tf_alloc_eem_tbl_scope(struct tf *tfp,
1514 struct tf_alloc_tbl_scope_parms *parms)
1518 struct tf_tbl_scope_cb *tbl_scope_cb;
1519 struct tf_em_table *em_tables;
1521 struct tf_session *session;
1522 struct tf_free_tbl_scope_parms free_parms;
1524 /* check parameters */
1525 if (parms == NULL || tfp->session == NULL) {
1526 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1530 session = (struct tf_session *)tfp->session->core_data;
1532 /* Get Table Scope control block from the session pool */
1533 index = ba_alloc(session->tbl_scope_pool_rx);
1535 PMD_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
1540 tbl_scope_cb = &session->tbl_scopes[index];
1541 tbl_scope_cb->index = index;
1542 tbl_scope_cb->tbl_scope_id = index;
1543 parms->tbl_scope_id = index;
1545 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1546 rc = tf_msg_em_qcaps(tfp,
1548 &tbl_scope_cb->em_caps[dir]);
1551 "EEM: Unable to query for EEM capability\n");
1557 * Validate and setup table sizes
1559 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1562 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1564 * Allocate tables and signal configuration to FW
1566 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1569 "EEM: Unable to register for EEM ctx\n");
1573 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1574 rc = tf_msg_em_cfg(tfp,
1575 em_tables[KEY0_TABLE].num_entries,
1576 em_tables[KEY0_TABLE].ctx_id,
1577 em_tables[KEY1_TABLE].ctx_id,
1578 em_tables[RECORD_TABLE].ctx_id,
1579 em_tables[EFC_TABLE].ctx_id,
1580 parms->hw_flow_cache_flush_timer,
1584 "TBL: Unable to configure EEM in firmware\n");
1588 rc = tf_msg_em_op(tfp,
1590 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1594 "EEM: Unable to enable EEM in firmware\n");
1598 /* Allocate the pool of offsets of the external memory.
1599 * Initially, this is a single fixed size pool for all external
1600 * actions related to a single table scope.
1602 rc = tf_create_tbl_pool_external(dir,
1604 em_tables[RECORD_TABLE].num_entries,
1605 em_tables[RECORD_TABLE].entry_size);
1608 "%d TBL: Unable to allocate idx pools %s\n",
1618 free_parms.tbl_scope_id = index;
1619 tf_free_eem_tbl_scope_cb(tfp, &free_parms);
1623 /* Free Table control block */
1624 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1628 /* API defined in tf_core.h */
1630 tf_set_tbl_entry(struct tf *tfp,
1631 struct tf_set_tbl_entry_parms *parms)
1634 struct tf_tbl_scope_cb *tbl_scope_cb;
1635 struct tf_session *session;
1637 if (tfp == NULL || parms == NULL || parms->data == NULL)
1640 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1642 "dir:%d, Session info invalid\n",
1647 if (parms->type == TF_TBL_TYPE_EXT) {
1649 uint32_t offset = parms->idx;
1650 uint32_t tbl_scope_id;
1652 session = (struct tf_session *)(tfp->session->core_data);
1654 tbl_scope_id = parms->tbl_scope_id;
1656 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1658 "dir:%d, Table scope not allocated\n",
1663 /* Get the table scope control block associated with the
1666 tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);
1668 if (tbl_scope_cb == NULL)
1671 /* External table, implicitly the Action table */
1672 base_addr = tf_em_get_table_page(tbl_scope_cb,
1676 if (base_addr == NULL) {
1678 "dir:%d, Base address lookup failed\n",
1683 offset %= TF_EM_PAGE_SIZE;
1684 rte_memcpy((char *)base_addr + offset,
1686 parms->data_sz_in_bytes);
1688 /* Internal table type processing */
1689 rc = tf_set_tbl_entry_internal(tfp, parms);
1692 "dir:%d, Set failed, type:%d, rc:%d\n",
1702 /* API defined in tf_core.h */
1704 tf_get_tbl_entry(struct tf *tfp,
1705 struct tf_get_tbl_entry_parms *parms)
1709 if (tfp == NULL || parms == NULL)
1712 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1714 "dir:%d, Session info invalid\n",
1719 if (parms->type == TF_TBL_TYPE_EXT) {
1721 "dir:%d, External table type not supported\n",
1726 /* Internal table type processing */
1727 rc = tf_get_tbl_entry_internal(tfp, parms);
1730 "dir:%d, Get failed, type:%d, rc:%d\n",
1739 /* API defined in tf_core.h */
1741 tf_get_bulk_tbl_entry(struct tf *tfp,
1742 struct tf_get_bulk_tbl_entry_parms *parms)
1746 TF_CHECK_PARMS_SESSION(tfp, parms);
1748 if (parms->type == TF_TBL_TYPE_EXT) {
1749 /* Not supported, yet */
1751 "%s, External table type not supported\n",
1752 tf_dir_2_str(parms->dir));
1756 /* Internal table type processing */
1757 rc = tf_get_bulk_tbl_entry_internal(tfp, parms);
1760 "%s, Bulk get failed, type:%d, rc:%s\n",
1761 tf_dir_2_str(parms->dir),
1769 /* API defined in tf_core.h */
1771 tf_alloc_tbl_scope(struct tf *tfp,
1772 struct tf_alloc_tbl_scope_parms *parms)
1776 /* check parameters */
1777 if (parms == NULL || tfp == NULL) {
1778 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1782 rc = tf_alloc_eem_tbl_scope(tfp, parms);
1787 /* API defined in tf_core.h */
1789 tf_free_tbl_scope(struct tf *tfp,
1790 struct tf_free_tbl_scope_parms *parms)
1794 /* check parameters */
1795 if (parms == NULL || tfp == NULL) {
1796 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1800 /* free table scope and all associated resources */
1801 rc = tf_free_eem_tbl_scope_cb(tfp, parms);
1806 /* API defined in tf_core.h */
1808 tf_alloc_tbl_entry(struct tf *tfp,
1809 struct tf_alloc_tbl_entry_parms *parms)
1812 #if (TF_SHADOW == 1)
1813 struct tf_session *tfs;
1814 #endif /* TF_SHADOW */
1816 /* Check parameters */
1817 if (parms == NULL || tfp == NULL) {
1818 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1822 * No shadow copy support for external tables, allocate and return
1824 if (parms->type == TF_TBL_TYPE_EXT) {
1825 rc = tf_alloc_tbl_entry_pool_external(tfp, parms);
1829 #if (TF_SHADOW == 1)
1830 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1832 "dir:%d, Session info invalid\n",
1837 tfs = (struct tf_session *)(tfp->session->core_data);
1839 /* Search the Shadow DB for requested element. If not found go
1840 * allocate one from the Session Pool
1842 if (parms->search_enable && tfs->shadow_copy) {
1843 rc = tf_alloc_tbl_entry_shadow(tfs, parms);
1844 /* Entry found and parms populated with return data */
1848 #endif /* TF_SHADOW */
1850 rc = tf_alloc_tbl_entry_pool_internal(tfp, parms);
1852 PMD_DRV_LOG(ERR, "dir%d, Alloc failed, rc:%d\n",
1859 /* API defined in tf_core.h */
1861 tf_free_tbl_entry(struct tf *tfp,
1862 struct tf_free_tbl_entry_parms *parms)
1865 #if (TF_SHADOW == 1)
1866 struct tf_session *tfs;
1867 #endif /* TF_SHADOW */
1869 /* Check parameters */
1870 if (parms == NULL || tfp == NULL) {
1871 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1875 * No shadow of external tables so just free the entry
1877 if (parms->type == TF_TBL_TYPE_EXT) {
1878 rc = tf_free_tbl_entry_pool_external(tfp, parms);
1882 #if (TF_SHADOW == 1)
1883 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1885 "dir:%d, Session info invalid\n",
1890 tfs = (struct tf_session *)(tfp->session->core_data);
1892 /* Search the Shadow DB for requested element. If not found go
1893 * allocate one from the Session Pool
1895 if (parms->search_enable && tfs->shadow_copy) {
1896 rc = tf_free_tbl_entry_shadow(tfs, parms);
1897 /* Entry free'ed and parms populated with return data */
1901 #endif /* TF_SHADOW */
1903 rc = tf_free_tbl_entry_pool_internal(tfp, parms);
1906 PMD_DRV_LOG(ERR, "dir:%d, Alloc failed, rc:%d\n",
1914 tf_dump_link_page_table(struct tf_em_page_tbl *tp,
1915 struct tf_em_page_tbl *tp_next)
1922 printf("pg_count:%d pg_size:0x%x\n",
1925 for (i = 0; i < tp->pg_count; i++) {
1926 pg_va = tp->pg_va_tbl[i];
1927 printf("\t%p\n", (void *)pg_va);
1928 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
1929 printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
1930 if (((pg_va[j] & 0x7) ==
1931 tfp_cpu_to_le_64(PTU_PTE_LAST |
1935 if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
1936 printf("** Invalid entry **\n");
1940 if (++k >= tp_next->pg_count) {
1941 printf("** Shouldn't get here **\n");
1948 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
1950 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
1952 struct tf_session *session;
1953 struct tf_tbl_scope_cb *tbl_scope_cb;
1954 struct tf_em_page_tbl *tp;
1955 struct tf_em_page_tbl *tp_next;
1956 struct tf_em_table *tbl;
1961 printf("called %s\n", __func__);
1963 /* find session struct */
1964 session = (struct tf_session *)tfp->session->core_data;
1966 /* find control block for table scope */
1967 tbl_scope_cb = tbl_scope_cb_find(session,
1969 if (tbl_scope_cb == NULL)
1970 TFP_DRV_LOG(ERR, "No table scope\n");
1972 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1973 printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
1975 for (j = KEY0_TABLE; j < MAX_TABLE; j++) {
1976 tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
1978 ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
1984 if (tbl->pg_tbl[0].pg_va_tbl &&
1985 tbl->pg_tbl[0].pg_pa_tbl)
1987 tbl->pg_tbl[0].pg_va_tbl[0],
1988 (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
1989 for (i = 0; i < tbl->num_lvl - 1; i++) {
1990 printf("Level:%d\n", i);
1991 tp = &tbl->pg_tbl[i];
1992 tp_next = &tbl->pg_tbl[i + 1];
1993 tf_dump_link_page_table(tp, tp_next);