1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
6 /* Truflow Table APIs and supporting code */
12 #include <sys/param.h>
13 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include "hsi_struct_def_dpdk.h"
24 #include "tf_resources.h"
27 #define PTU_PTE_VALID 0x1UL
28 #define PTU_PTE_LAST 0x2UL
29 #define PTU_PTE_NEXT_TO_LAST 0x4UL
31 /* Number of pointers per page_size */
32 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
34 #define TF_EM_PG_SZ_4K (1 << 12)
35 #define TF_EM_PG_SZ_8K (1 << 13)
36 #define TF_EM_PG_SZ_64K (1 << 16)
37 #define TF_EM_PG_SZ_256K (1 << 18)
38 #define TF_EM_PG_SZ_1M (1 << 20)
39 #define TF_EM_PG_SZ_2M (1 << 21)
40 #define TF_EM_PG_SZ_4M (1 << 22)
41 #define TF_EM_PG_SZ_1G (1 << 30)
43 #define TF_EM_CTX_ID_INVALID 0xFFFF
45 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
46 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
49 * Function to free a page table
52 * Pointer to the page table to free
55 tf_em_free_pg_tbl(struct tf_em_page_tbl *tp)
59 for (i = 0; i < tp->pg_count; i++) {
60 if (!tp->pg_va_tbl[i]) {
62 "No map for page %d table %016" PRIu64 "\n",
64 (uint64_t)(uintptr_t)tp);
68 tfp_free(tp->pg_va_tbl[i]);
69 tp->pg_va_tbl[i] = NULL;
73 tfp_free(tp->pg_va_tbl);
75 tfp_free(tp->pg_pa_tbl);
80 * Function to free an EM table
83 * Pointer to the EM table to free
86 tf_em_free_page_table(struct tf_em_table *tbl)
88 struct tf_em_page_tbl *tp;
91 for (i = 0; i < tbl->num_lvl; i++) {
95 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
100 tf_em_free_pg_tbl(tp);
104 tbl->l0_dma_addr = 0;
106 tbl->num_data_pages = 0;
110 * Allocation of page tables
113 * Pointer to a TruFlow handle
116 * Page count to allocate
123 * -ENOMEM - Out of memory
126 tf_em_alloc_pg_tbl(struct tf_em_page_tbl *tp,
131 struct tfp_calloc_parms parms;
133 parms.nitems = pg_count;
134 parms.size = sizeof(void *);
137 if (tfp_calloc(&parms) != 0)
140 tp->pg_va_tbl = parms.mem_va;
142 if (tfp_calloc(&parms) != 0) {
143 tfp_free(tp->pg_va_tbl);
147 tp->pg_pa_tbl = parms.mem_va;
150 tp->pg_size = pg_size;
152 for (i = 0; i < pg_count; i++) {
154 parms.size = pg_size;
155 parms.alignment = TF_EM_PAGE_ALIGNMENT;
157 if (tfp_calloc(&parms) != 0)
160 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
161 tp->pg_va_tbl[i] = parms.mem_va;
163 memset(tp->pg_va_tbl[i], 0, pg_size);
170 tf_em_free_pg_tbl(tp);
175 * Allocates EM page tables
178 * Table to allocate pages for
182 * -ENOMEM - Out of memory
185 tf_em_alloc_page_table(struct tf_em_table *tbl)
187 struct tf_em_page_tbl *tp;
192 for (i = 0; i < tbl->num_lvl; i++) {
193 tp = &tbl->pg_tbl[i];
195 rc = tf_em_alloc_pg_tbl(tp,
200 "Failed to allocate page table: lvl: %d\n",
205 for (j = 0; j < tp->pg_count; j++) {
207 "EEM: Allocated page table: size %u lvl %d cnt"
212 (uint32_t *)tp->pg_va_tbl[j],
213 (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
219 tf_em_free_page_table(tbl);
224 * Links EM page tables
227 * Pointer to page table
230 * Pointer to the next page table
233 * Flag controlling if the page table is last
236 tf_em_link_page_table(struct tf_em_page_tbl *tp,
237 struct tf_em_page_tbl *tp_next,
240 uint64_t *pg_pa = tp_next->pg_pa_tbl;
247 for (i = 0; i < tp->pg_count; i++) {
248 pg_va = tp->pg_va_tbl[i];
250 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
251 if (k == tp_next->pg_count - 2 && set_pte_last)
252 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
253 else if (k == tp_next->pg_count - 1 && set_pte_last)
254 valid = PTU_PTE_LAST | PTU_PTE_VALID;
256 valid = PTU_PTE_VALID;
258 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
259 if (++k >= tp_next->pg_count)
266 * Setup a EM page table
269 * Pointer to EM page table
272 tf_em_setup_page_table(struct tf_em_table *tbl)
274 struct tf_em_page_tbl *tp_next;
275 struct tf_em_page_tbl *tp;
276 bool set_pte_last = 0;
279 for (i = 0; i < tbl->num_lvl - 1; i++) {
280 tp = &tbl->pg_tbl[i];
281 tp_next = &tbl->pg_tbl[i + 1];
282 if (i == tbl->num_lvl - 2)
284 tf_em_link_page_table(tp, tp_next, set_pte_last);
287 tbl->l0_addr = tbl->pg_tbl[PT_LVL_0].pg_va_tbl[0];
288 tbl->l0_dma_addr = tbl->pg_tbl[PT_LVL_0].pg_pa_tbl[0];
292 * Given the page size, size of each data item (entry size),
293 * and the total number of entries needed, determine the number
294 * of page table levels and the number of data pages required.
303 * Number of entries needed
305 * [out] num_data_pages
306 * Number of pages required
309 * Success - Number of EM page levels required
310 * -ENOMEM - Out of memory
313 tf_em_size_page_tbl_lvl(uint32_t page_size,
315 uint32_t num_entries,
316 uint64_t *num_data_pages)
318 uint64_t lvl_data_size = page_size;
323 data_size = (uint64_t)num_entries * entry_size;
325 while (lvl_data_size < data_size) {
329 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
331 else if (lvl == PT_LVL_2)
332 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
333 MAX_PAGE_PTRS(page_size) * page_size;
338 *num_data_pages = roundup(data_size, page_size) / page_size;
344 * Return the number of page table pages needed to
345 * reference the given number of next level pages.
351 * Size of each EM page
354 * Number of EM page table pages
357 tf_em_page_tbl_pgcnt(uint32_t num_pages,
360 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
361 MAX_PAGE_PTRS(page_size);
366 * Given the number of data pages, page_size and the maximum
367 * number of page table levels (already determined), size
368 * the number of page table pages required at each level.
371 * Max number of levels
373 * [in] num_data_pages
374 * Number of EM data pages
383 tf_em_size_page_tbls(int max_lvl,
384 uint64_t num_data_pages,
388 if (max_lvl == PT_LVL_0) {
389 page_cnt[PT_LVL_0] = num_data_pages;
390 } else if (max_lvl == PT_LVL_1) {
391 page_cnt[PT_LVL_1] = num_data_pages;
393 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
394 } else if (max_lvl == PT_LVL_2) {
395 page_cnt[PT_LVL_2] = num_data_pages;
397 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_2], page_size);
399 tf_em_page_tbl_pgcnt(page_cnt[PT_LVL_1], page_size);
406 * Size the EM table based on capabilities
413 * - EINVAL - Parameter error
414 * - ENOMEM - Out of memory
417 tf_em_size_table(struct tf_em_table *tbl)
419 uint64_t num_data_pages;
422 uint32_t num_entries;
423 uint32_t cnt = TF_EM_MIN_ENTRIES;
425 /* Ignore entry if both size and number are zero */
426 if (!tbl->entry_size && !tbl->num_entries)
429 /* If only one is set then error */
430 if (!tbl->entry_size || !tbl->num_entries)
433 /* Determine number of page table levels and the number
434 * of data pages needed to process the given eem table.
436 if (tbl->type == RECORD_TABLE) {
438 * For action records just a memory size is provided. Work
439 * backwards to resolve to number of entries
441 num_entries = tbl->num_entries / tbl->entry_size;
442 if (num_entries < TF_EM_MIN_ENTRIES) {
443 num_entries = TF_EM_MIN_ENTRIES;
445 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
450 num_entries = tbl->num_entries;
453 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
458 PMD_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
460 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
462 (uint64_t)num_entries * tbl->entry_size,
467 tbl->num_lvl = max_lvl + 1;
468 tbl->num_data_pages = num_data_pages;
470 /* Determine the number of pages needed at each level */
471 page_cnt = tbl->page_cnt;
472 memset(page_cnt, 0, sizeof(tbl->page_cnt));
473 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
476 PMD_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
478 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
480 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
490 * Unregisters EM Ctx in Firmware
493 * Pointer to a TruFlow handle
496 * Pointer to a table scope control block
499 * Receive or transmit direction
502 tf_em_ctx_unreg(struct tf *tfp,
503 struct tf_tbl_scope_cb *tbl_scope_cb,
506 struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
507 struct tf_em_table *tbl;
510 for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
511 tbl = &ctxp->em_tables[i];
513 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
514 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
515 tf_em_free_page_table(tbl);
521 * Registers EM Ctx in Firmware
524 * Pointer to a TruFlow handle
527 * Pointer to a table scope control block
530 * Receive or transmit direction
534 * -ENOMEM - Out of Memory
537 tf_em_ctx_reg(struct tf *tfp,
538 struct tf_tbl_scope_cb *tbl_scope_cb,
541 struct tf_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
542 struct tf_em_table *tbl;
546 for (i = KEY0_TABLE; i < MAX_TABLE; i++) {
547 tbl = &ctxp->em_tables[i];
549 if (tbl->num_entries && tbl->entry_size) {
550 rc = tf_em_size_table(tbl);
555 rc = tf_em_alloc_page_table(tbl);
559 tf_em_setup_page_table(tbl);
560 rc = tf_msg_em_mem_rgtr(tfp,
562 TF_EM_PAGE_SIZE_ENUM,
572 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
577 * Validates EM number of entries requested
580 * Pointer to table scope control block to be populated
583 * Pointer to input parameters
587 * -EINVAL - Parameter error
590 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
591 struct tf_alloc_tbl_scope_parms *parms)
595 if (parms->rx_mem_size_in_mb != 0) {
596 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
597 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
599 uint32_t num_entries = (parms->rx_mem_size_in_mb *
600 TF_MEGABYTE) / (key_b + action_b);
602 if (num_entries < TF_EM_MIN_ENTRIES) {
603 PMD_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
605 parms->rx_mem_size_in_mb);
609 cnt = TF_EM_MIN_ENTRIES;
610 while (num_entries > cnt &&
611 cnt <= TF_EM_MAX_ENTRIES)
614 if (cnt > TF_EM_MAX_ENTRIES) {
615 PMD_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
617 (parms->tx_num_flows_in_k * TF_KILOBYTE));
621 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
623 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
625 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
626 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
628 "EEM: Invalid number of Rx flows "
629 "requested:%u max:%u\n",
630 parms->rx_num_flows_in_k * TF_KILOBYTE,
631 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
635 /* must be a power-of-2 supported value
636 * in the range 32K - 128M
638 cnt = TF_EM_MIN_ENTRIES;
639 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
640 cnt <= TF_EM_MAX_ENTRIES)
643 if (cnt > TF_EM_MAX_ENTRIES) {
645 "EEM: Invalid number of Rx requested: %u\n",
646 (parms->rx_num_flows_in_k * TF_KILOBYTE));
651 if (parms->tx_mem_size_in_mb != 0) {
652 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
653 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
655 uint32_t num_entries = (parms->tx_mem_size_in_mb *
656 (TF_KILOBYTE * TF_KILOBYTE)) /
659 if (num_entries < TF_EM_MIN_ENTRIES) {
661 "EEM: Insufficient memory requested:%uMB\n",
662 parms->rx_mem_size_in_mb);
666 cnt = TF_EM_MIN_ENTRIES;
667 while (num_entries > cnt &&
668 cnt <= TF_EM_MAX_ENTRIES)
671 if (cnt > TF_EM_MAX_ENTRIES) {
673 "EEM: Invalid number of Tx requested: %u\n",
674 (parms->tx_num_flows_in_k * TF_KILOBYTE));
678 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
680 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
682 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
683 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
685 "EEM: Invalid number of Tx flows "
686 "requested:%u max:%u\n",
687 (parms->tx_num_flows_in_k * TF_KILOBYTE),
688 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
692 cnt = TF_EM_MIN_ENTRIES;
693 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
694 cnt <= TF_EM_MAX_ENTRIES)
697 if (cnt > TF_EM_MAX_ENTRIES) {
699 "EEM: Invalid number of Tx requested: %u\n",
700 (parms->tx_num_flows_in_k * TF_KILOBYTE));
705 if (parms->rx_num_flows_in_k != 0 &&
706 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
708 "EEM: Rx key size required: %u\n",
709 (parms->rx_max_key_sz_in_bits));
713 if (parms->tx_num_flows_in_k != 0 &&
714 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
716 "EEM: Tx key size required: %u\n",
717 (parms->tx_max_key_sz_in_bits));
721 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].num_entries =
722 parms->rx_num_flows_in_k * TF_KILOBYTE;
723 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY0_TABLE].entry_size =
724 parms->rx_max_key_sz_in_bits / 8;
726 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].num_entries =
727 parms->rx_num_flows_in_k * TF_KILOBYTE;
728 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[KEY1_TABLE].entry_size =
729 parms->rx_max_key_sz_in_bits / 8;
731 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].num_entries =
732 parms->rx_num_flows_in_k * TF_KILOBYTE;
733 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[RECORD_TABLE].entry_size =
734 parms->rx_max_action_entry_sz_in_bits / 8;
736 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[EFC_TABLE].num_entries =
740 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].num_entries =
741 parms->tx_num_flows_in_k * TF_KILOBYTE;
742 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY0_TABLE].entry_size =
743 parms->tx_max_key_sz_in_bits / 8;
745 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].num_entries =
746 parms->tx_num_flows_in_k * TF_KILOBYTE;
747 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[KEY1_TABLE].entry_size =
748 parms->tx_max_key_sz_in_bits / 8;
750 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].num_entries =
751 parms->tx_num_flows_in_k * TF_KILOBYTE;
752 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[RECORD_TABLE].entry_size =
753 parms->tx_max_action_entry_sz_in_bits / 8;
755 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[EFC_TABLE].num_entries =
762 * Internal function to set a Table Entry. Supports all internal Table Types
765 * Pointer to TruFlow handle
768 * Pointer to input parameters
772 * -EINVAL - Parameter error
775 tf_set_tbl_entry_internal(struct tf *tfp,
776 struct tf_set_tbl_entry_parms *parms)
781 struct bitalloc *session_pool;
782 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
784 /* Lookup the pool using the table type of the element */
785 rc = tf_rm_lookup_tbl_type_pool(tfs,
789 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
795 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
796 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
797 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
799 "dir:%d, Type not supported, type:%d\n",
805 /* Adjust the returned index/offset as there is no guarantee
806 * that the start is 0 at time of RM allocation
808 tf_rm_convert_index(tfs,
811 TF_RM_CONVERT_RM_BASE,
815 /* Verify that the entry has been previously allocated */
816 id = ba_inuse(session_pool, index);
819 "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
827 rc = tf_msg_set_tbl_entry(tfp,
830 parms->data_sz_in_bytes,
835 "dir:%d, Set failed, type:%d, rc:%d\n",
845 * Internal function to get a Table Entry. Supports all Table Types
846 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
849 * Pointer to TruFlow handle
852 * Pointer to input parameters
856 * -EINVAL - Parameter error
859 tf_get_tbl_entry_internal(struct tf *tfp,
860 struct tf_get_tbl_entry_parms *parms)
865 struct bitalloc *session_pool;
866 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
868 /* Lookup the pool using the table type of the element */
869 rc = tf_rm_lookup_tbl_type_pool(tfs,
873 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
879 /* Adjust the returned index/offset as there is no guarantee
880 * that the start is 0 at time of RM allocation
882 tf_rm_convert_index(tfs,
885 TF_RM_CONVERT_RM_BASE,
889 /* Verify that the entry has been previously allocated */
890 id = ba_inuse(session_pool, index);
893 "dir:%d, Invalid or not allocated index, type:%d, idx:%d\n",
901 rc = tf_msg_get_tbl_entry(tfp,
904 parms->data_sz_in_bytes,
909 "dir:%d, Get failed, type:%d, rc:%d\n",
920 * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
921 * the requested entry. If found the ref count is incremente and
927 * Allocation parameters
930 * 0 - Success, entry found and ref count incremented
931 * -ENOENT - Failure, entry not found
934 tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
935 struct tf_alloc_tbl_entry_parms *parms __rte_unused)
938 "dir:%d, Entry Alloc with search not supported\n",
946 * Free Tbl entry from the Shadow DB. Shadow DB is searched for
947 * the requested entry. If found the ref count is decremente and
948 * new ref_count returned.
953 * Allocation parameters
956 * 0 - Success, entry found and ref count decremented
957 * -ENOENT - Failure, entry not found
960 tf_free_tbl_entry_shadow(struct tf_session *tfs,
961 struct tf_free_tbl_entry_parms *parms)
964 "dir:%d, Entry Free with search not supported\n",
969 #endif /* TF_SHADOW */
972 * Create External Tbl pool of memory indexes.
977 * pointer to the table scope
979 * number of entries to write
980 * [in] entry_sz_bytes
984 * 0 - Success, entry allocated - no search support
985 * -ENOMEM -EINVAL -EOPNOTSUPP
986 * - Failure, entry not allocated, out of resources
989 tf_create_tbl_pool_external(enum tf_dir dir,
990 struct tf_tbl_scope_cb *tbl_scope_cb,
991 uint32_t num_entries,
992 uint32_t entry_sz_bytes)
994 struct tfp_calloc_parms parms;
998 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
1000 parms.nitems = num_entries;
1001 parms.size = sizeof(uint32_t);
1002 parms.alignment = 0;
1004 if (tfp_calloc(&parms) != 0) {
1005 PMD_DRV_LOG(ERR, "%d: TBL: external pool failure %s\n",
1006 dir, strerror(-ENOMEM));
1010 /* Create empty stack
1012 rc = stack_init(num_entries, parms.mem_va, pool);
1015 PMD_DRV_LOG(ERR, "%d: TBL: stack init failure %s\n",
1016 dir, strerror(-rc));
1020 /* Save the malloced memory address so that it can
1021 * be freed when the table scope is freed.
1023 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
1025 /* Fill pool with indexes in reverse
1027 j = (num_entries - 1) * entry_sz_bytes;
1029 for (i = 0; i < num_entries; i++) {
1030 rc = stack_push(pool, j);
1032 PMD_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
1033 tf_dir_2_str(dir), strerror(-rc));
1038 PMD_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
1042 j -= entry_sz_bytes;
1045 if (!stack_is_full(pool)) {
1047 PMD_DRV_LOG(ERR, "%d TBL: stack failure %s\n",
1048 dir, strerror(-rc));
1053 tfp_free((void *)parms.mem_va);
1058 * Destroy External Tbl pool of memory indexes.
1063 * pointer to the table scope
1067 tf_destroy_tbl_pool_external(enum tf_dir dir,
1068 struct tf_tbl_scope_cb *tbl_scope_cb)
1070 uint32_t *ext_act_pool_mem =
1071 tbl_scope_cb->ext_act_pool_mem[dir];
1073 tfp_free(ext_act_pool_mem);
1077 * Allocate External Tbl entry from the Session Pool.
1080 * Pointer to Truflow Handle
1082 * Allocation parameters
1085 * 0 - Success, entry allocated - no search support
1086 * -ENOMEM -EINVAL -EOPNOTSUPP
1087 * - Failure, entry not allocated, out of resources
1090 tf_alloc_tbl_entry_pool_external(struct tf *tfp,
1091 struct tf_alloc_tbl_entry_parms *parms)
1095 struct tf_session *tfs;
1096 struct tf_tbl_scope_cb *tbl_scope_cb;
1099 /* Check parameters */
1100 if (tfp == NULL || parms == NULL) {
1101 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1105 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1107 "dir:%d, Session info invalid\n",
1112 tfs = (struct tf_session *)(tfp->session->core_data);
1114 /* Get the pool info from the table scope
1116 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1118 if (tbl_scope_cb == NULL) {
1120 "%s, table scope not allocated\n",
1121 tf_dir_2_str(parms->dir));
1124 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1126 /* Allocate an element
1128 rc = stack_pop(pool, &index);
1132 "dir:%d, Allocation failed, type:%d\n",
1142 * Allocate Internal Tbl entry from the Session Pool.
1145 * Pointer to Truflow Handle
1147 * Allocation parameters
1150 * 0 - Success, entry found and ref count decremented
1151 * -ENOMEM - Failure, entry not allocated, out of resources
1154 tf_alloc_tbl_entry_pool_internal(struct tf *tfp,
1155 struct tf_alloc_tbl_entry_parms *parms)
1161 struct bitalloc *session_pool;
1162 struct tf_session *tfs;
1164 /* Check parameters */
1165 if (tfp == NULL || parms == NULL) {
1166 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1170 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1172 "dir:%d, Session info invalid\n",
1177 tfs = (struct tf_session *)(tfp->session->core_data);
1179 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1180 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1181 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1182 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1183 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1184 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1185 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1187 "dir:%d, Type not supported, type:%d\n",
1193 /* Lookup the pool using the table type of the element */
1194 rc = tf_rm_lookup_tbl_type_pool(tfs,
1198 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1202 id = ba_alloc(session_pool);
1204 free_cnt = ba_free_count(session_pool);
1207 "dir:%d, Allocation failed, type:%d, free:%d\n",
1214 /* Adjust the returned index/offset as there is no guarantee
1215 * that the start is 0 at time of RM allocation
1217 tf_rm_convert_index(tfs,
1220 TF_RM_CONVERT_ADD_BASE,
1228 * Free External Tbl entry to the session pool.
1231 * Pointer to Truflow Handle
1233 * Allocation parameters
1236 * 0 - Success, entry freed
1238 * - Failure, entry not successfully freed for these reasons
1244 tf_free_tbl_entry_pool_external(struct tf *tfp,
1245 struct tf_free_tbl_entry_parms *parms)
1248 struct tf_session *tfs;
1250 struct tf_tbl_scope_cb *tbl_scope_cb;
1253 /* Check parameters */
1254 if (tfp == NULL || parms == NULL) {
1255 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1259 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1261 "dir:%d, Session info invalid\n",
1266 tfs = (struct tf_session *)(tfp->session->core_data);
1268 /* Get the pool info from the table scope
1270 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1272 if (tbl_scope_cb == NULL) {
1274 "dir:%d, Session info invalid\n",
1278 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1282 rc = stack_push(pool, index);
1286 "dir:%d, consistency error, stack full, type:%d, idx:%d\n",
1295 * Free Internal Tbl entry from the Session Pool.
1298 * Pointer to Truflow Handle
1300 * Allocation parameters
1303 * 0 - Success, entry found and ref count decremented
1304 * -ENOMEM - Failure, entry not allocated, out of resources
1307 tf_free_tbl_entry_pool_internal(struct tf *tfp,
1308 struct tf_free_tbl_entry_parms *parms)
1312 struct bitalloc *session_pool;
1313 struct tf_session *tfs;
1316 /* Check parameters */
1317 if (tfp == NULL || parms == NULL) {
1318 PMD_DRV_LOG(ERR, "Invalid parameters\n");
1322 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1324 "dir:%d, Session info invalid\n",
1329 tfs = (struct tf_session *)(tfp->session->core_data);
1331 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1332 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1333 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1334 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1335 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1336 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1337 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1339 "dir:%d, Type not supported, type:%d\n",
1345 /* Lookup the pool using the table type of the element */
1346 rc = tf_rm_lookup_tbl_type_pool(tfs,
1350 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1356 /* Adjust the returned index/offset as there is no guarantee
1357 * that the start is 0 at time of RM allocation
1359 tf_rm_convert_index(tfs,
1362 TF_RM_CONVERT_RM_BASE,
1366 /* Check if element was indeed allocated */
1367 id = ba_inuse_free(session_pool, index);
1370 "dir:%d, Element not previously alloc'ed, type:%d, idx:%d\n",
1380 /* API defined in tf_em.h */
1381 struct tf_tbl_scope_cb *
1382 tbl_scope_cb_find(struct tf_session *session,
1383 uint32_t tbl_scope_id)
1387 /* Check that id is valid */
1388 i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
1392 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
1393 if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
1394 return &session->tbl_scopes[i];
1400 /* API defined in tf_core.h */
1402 tf_free_eem_tbl_scope_cb(struct tf *tfp,
1403 struct tf_free_tbl_scope_parms *parms)
1407 struct tf_tbl_scope_cb *tbl_scope_cb;
1408 struct tf_session *session;
1410 session = (struct tf_session *)(tfp->session->core_data);
1412 tbl_scope_cb = tbl_scope_cb_find(session,
1413 parms->tbl_scope_id);
1415 if (tbl_scope_cb == NULL)
1418 /* Free Table control block */
1419 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1421 /* free table scope locks */
1422 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1423 /* Free associated external pools
1425 tf_destroy_tbl_pool_external(dir,
1429 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1431 /* free table scope and all associated resources */
1432 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1438 /* API defined in tf_em.h */
1440 tf_alloc_eem_tbl_scope(struct tf *tfp,
1441 struct tf_alloc_tbl_scope_parms *parms)
1445 struct tf_tbl_scope_cb *tbl_scope_cb;
1446 struct tf_em_table *em_tables;
1448 struct tf_session *session;
1449 struct tf_free_tbl_scope_parms free_parms;
1451 /* check parameters */
1452 if (parms == NULL || tfp->session == NULL) {
1453 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1457 session = (struct tf_session *)tfp->session->core_data;
1459 /* Get Table Scope control block from the session pool */
1460 index = ba_alloc(session->tbl_scope_pool_rx);
1462 PMD_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
1467 tbl_scope_cb = &session->tbl_scopes[index];
1468 tbl_scope_cb->index = index;
1469 tbl_scope_cb->tbl_scope_id = index;
1470 parms->tbl_scope_id = index;
1472 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1473 rc = tf_msg_em_qcaps(tfp,
1475 &tbl_scope_cb->em_caps[dir]);
1478 "EEM: Unable to query for EEM capability\n");
1484 * Validate and setup table sizes
1486 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1489 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1491 * Allocate tables and signal configuration to FW
1493 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1496 "EEM: Unable to register for EEM ctx\n");
1500 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1501 rc = tf_msg_em_cfg(tfp,
1502 em_tables[KEY0_TABLE].num_entries,
1503 em_tables[KEY0_TABLE].ctx_id,
1504 em_tables[KEY1_TABLE].ctx_id,
1505 em_tables[RECORD_TABLE].ctx_id,
1506 em_tables[EFC_TABLE].ctx_id,
1507 parms->hw_flow_cache_flush_timer,
1511 "TBL: Unable to configure EEM in firmware\n");
1515 rc = tf_msg_em_op(tfp,
1517 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1521 "EEM: Unable to enable EEM in firmware\n");
1525 /* Allocate the pool of offsets of the external memory.
1526 * Initially, this is a single fixed size pool for all external
1527 * actions related to a single table scope.
1529 rc = tf_create_tbl_pool_external(dir,
1531 em_tables[RECORD_TABLE].num_entries,
1532 em_tables[RECORD_TABLE].entry_size);
1535 "%d TBL: Unable to allocate idx pools %s\n",
1545 free_parms.tbl_scope_id = index;
1546 tf_free_eem_tbl_scope_cb(tfp, &free_parms);
1550 /* Free Table control block */
1551 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1555 /* API defined in tf_core.h */
1557 tf_set_tbl_entry(struct tf *tfp,
1558 struct tf_set_tbl_entry_parms *parms)
1561 struct tf_tbl_scope_cb *tbl_scope_cb;
1562 struct tf_session *session;
1564 if (tfp == NULL || parms == NULL || parms->data == NULL)
1567 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1569 "dir:%d, Session info invalid\n",
1574 if (parms->type == TF_TBL_TYPE_EXT) {
1576 uint32_t offset = parms->idx;
1577 uint32_t tbl_scope_id;
1579 session = (struct tf_session *)(tfp->session->core_data);
1581 tbl_scope_id = parms->tbl_scope_id;
1583 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1585 "dir:%d, Table scope not allocated\n",
1590 /* Get the table scope control block associated with the
1593 tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);
1595 if (tbl_scope_cb == NULL)
1598 /* External table, implicitly the Action table */
1599 base_addr = tf_em_get_table_page(tbl_scope_cb,
1603 if (base_addr == NULL) {
1605 "dir:%d, Base address lookup failed\n",
1610 offset %= TF_EM_PAGE_SIZE;
1611 rte_memcpy((char *)base_addr + offset,
1613 parms->data_sz_in_bytes);
1615 /* Internal table type processing */
1616 rc = tf_set_tbl_entry_internal(tfp, parms);
1619 "dir:%d, Set failed, type:%d, rc:%d\n",
1629 /* API defined in tf_core.h */
1631 tf_get_tbl_entry(struct tf *tfp,
1632 struct tf_get_tbl_entry_parms *parms)
1636 if (tfp == NULL || parms == NULL)
1639 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1641 "dir:%d, Session info invalid\n",
1646 if (parms->type == TF_TBL_TYPE_EXT) {
1648 "dir:%d, External table type not supported\n",
1653 /* Internal table type processing */
1654 rc = tf_get_tbl_entry_internal(tfp, parms);
1657 "dir:%d, Get failed, type:%d, rc:%d\n",
1666 /* API defined in tf_core.h */
1668 tf_alloc_tbl_scope(struct tf *tfp,
1669 struct tf_alloc_tbl_scope_parms *parms)
1673 /* check parameters */
1674 if (parms == NULL || tfp == NULL) {
1675 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1679 rc = tf_alloc_eem_tbl_scope(tfp, parms);
1684 /* API defined in tf_core.h */
1686 tf_free_tbl_scope(struct tf *tfp,
1687 struct tf_free_tbl_scope_parms *parms)
1691 /* check parameters */
1692 if (parms == NULL || tfp == NULL) {
1693 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1697 /* free table scope and all associated resources */
1698 rc = tf_free_eem_tbl_scope_cb(tfp, parms);
1703 /* API defined in tf_core.h */
1705 tf_alloc_tbl_entry(struct tf *tfp,
1706 struct tf_alloc_tbl_entry_parms *parms)
1709 #if (TF_SHADOW == 1)
1710 struct tf_session *tfs;
1711 #endif /* TF_SHADOW */
1713 /* Check parameters */
1714 if (parms == NULL || tfp == NULL) {
1715 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1719 * No shadow copy support for external tables, allocate and return
1721 if (parms->type == TF_TBL_TYPE_EXT) {
1722 rc = tf_alloc_tbl_entry_pool_external(tfp, parms);
1726 #if (TF_SHADOW == 1)
1727 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1729 "dir:%d, Session info invalid\n",
1734 tfs = (struct tf_session *)(tfp->session->core_data);
1736 /* Search the Shadow DB for requested element. If not found go
1737 * allocate one from the Session Pool
1739 if (parms->search_enable && tfs->shadow_copy) {
1740 rc = tf_alloc_tbl_entry_shadow(tfs, parms);
1741 /* Entry found and parms populated with return data */
1745 #endif /* TF_SHADOW */
1747 rc = tf_alloc_tbl_entry_pool_internal(tfp, parms);
1749 PMD_DRV_LOG(ERR, "dir%d, Alloc failed, rc:%d\n",
1756 /* API defined in tf_core.h */
1758 tf_free_tbl_entry(struct tf *tfp,
1759 struct tf_free_tbl_entry_parms *parms)
1762 #if (TF_SHADOW == 1)
1763 struct tf_session *tfs;
1764 #endif /* TF_SHADOW */
1766 /* Check parameters */
1767 if (parms == NULL || tfp == NULL) {
1768 PMD_DRV_LOG(ERR, "TBL: Invalid parameters\n");
1772 * No shadow of external tables so just free the entry
1774 if (parms->type == TF_TBL_TYPE_EXT) {
1775 rc = tf_free_tbl_entry_pool_external(tfp, parms);
1779 #if (TF_SHADOW == 1)
1780 if (tfp->session == NULL || tfp->session->core_data == NULL) {
1782 "dir:%d, Session info invalid\n",
1787 tfs = (struct tf_session *)(tfp->session->core_data);
1789 /* Search the Shadow DB for requested element. If not found go
1790 * allocate one from the Session Pool
1792 if (parms->search_enable && tfs->shadow_copy) {
1793 rc = tf_free_tbl_entry_shadow(tfs, parms);
1794 /* Entry free'ed and parms populated with return data */
1798 #endif /* TF_SHADOW */
1800 rc = tf_free_tbl_entry_pool_internal(tfp, parms);
1803 PMD_DRV_LOG(ERR, "dir:%d, Alloc failed, rc:%d\n",
1811 tf_dump_link_page_table(struct tf_em_page_tbl *tp,
1812 struct tf_em_page_tbl *tp_next)
1819 printf("pg_count:%d pg_size:0x%x\n",
1822 for (i = 0; i < tp->pg_count; i++) {
1823 pg_va = tp->pg_va_tbl[i];
1824 printf("\t%p\n", (void *)pg_va);
1825 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
1826 printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
1827 if (((pg_va[j] & 0x7) ==
1828 tfp_cpu_to_le_64(PTU_PTE_LAST |
1832 if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
1833 printf("** Invalid entry **\n");
1837 if (++k >= tp_next->pg_count) {
1838 printf("** Shouldn't get here **\n");
1845 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
1847 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
1849 struct tf_session *session;
1850 struct tf_tbl_scope_cb *tbl_scope_cb;
1851 struct tf_em_page_tbl *tp;
1852 struct tf_em_page_tbl *tp_next;
1853 struct tf_em_table *tbl;
1858 printf("called %s\n", __func__);
1860 /* find session struct */
1861 session = (struct tf_session *)tfp->session->core_data;
1863 /* find control block for table scope */
1864 tbl_scope_cb = tbl_scope_cb_find(session,
1866 if (tbl_scope_cb == NULL)
1867 PMD_DRV_LOG(ERR, "No table scope\n");
1869 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1870 printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
1872 for (j = KEY0_TABLE; j < MAX_TABLE; j++) {
1873 tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
1875 ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
1881 if (tbl->pg_tbl[0].pg_va_tbl &&
1882 tbl->pg_tbl[0].pg_pa_tbl)
1884 tbl->pg_tbl[0].pg_va_tbl[0],
1885 (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
1886 for (i = 0; i < tbl->num_lvl - 1; i++) {
1887 printf("Level:%d\n", i);
1888 tp = &tbl->pg_tbl[i];
1889 tp_next = &tbl->pg_tbl[i + 1];
1890 tf_dump_link_page_table(tp, tp_next);