1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
6 /* Truflow Table APIs and supporting code */
12 #include <sys/param.h>
13 #include <rte_common.h>
14 #include <rte_errno.h>
15 #include "hsi_struct_def_dpdk.h"
24 #include "tf_resources.h"
27 #include "tf_common.h"
29 #define PTU_PTE_VALID 0x1UL
30 #define PTU_PTE_LAST 0x2UL
31 #define PTU_PTE_NEXT_TO_LAST 0x4UL
33 /* Number of pointers per page_size */
34 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
36 #define TF_EM_PG_SZ_4K (1 << 12)
37 #define TF_EM_PG_SZ_8K (1 << 13)
38 #define TF_EM_PG_SZ_64K (1 << 16)
39 #define TF_EM_PG_SZ_256K (1 << 18)
40 #define TF_EM_PG_SZ_1M (1 << 20)
41 #define TF_EM_PG_SZ_2M (1 << 21)
42 #define TF_EM_PG_SZ_4M (1 << 22)
43 #define TF_EM_PG_SZ_1G (1 << 30)
45 #define TF_EM_CTX_ID_INVALID 0xFFFF
47 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
48 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
51 * Function to free a page table
54 * Pointer to the page table to free
57 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
61 for (i = 0; i < tp->pg_count; i++) {
62 if (!tp->pg_va_tbl[i]) {
64 "No mapping for page: %d table: %016" PRIu64 "\n",
66 (uint64_t)(uintptr_t)tp);
70 tfp_free(tp->pg_va_tbl[i]);
71 tp->pg_va_tbl[i] = NULL;
75 tfp_free(tp->pg_va_tbl);
77 tfp_free(tp->pg_pa_tbl);
82 * Function to free an EM table
85 * Pointer to the EM table to free
88 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
90 struct hcapi_cfa_em_page_tbl *tp;
93 for (i = 0; i < tbl->num_lvl; i++) {
96 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
101 tf_em_free_pg_tbl(tp);
105 tbl->l0_dma_addr = 0;
107 tbl->num_data_pages = 0;
111 * Allocation of page tables
114 * Pointer to a TruFlow handle
117 * Page count to allocate
124 * -ENOMEM - Out of memory
127 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
132 struct tfp_calloc_parms parms;
134 parms.nitems = pg_count;
135 parms.size = sizeof(void *);
138 if (tfp_calloc(&parms) != 0)
141 tp->pg_va_tbl = parms.mem_va;
143 if (tfp_calloc(&parms) != 0) {
144 tfp_free(tp->pg_va_tbl);
148 tp->pg_pa_tbl = parms.mem_va;
151 tp->pg_size = pg_size;
153 for (i = 0; i < pg_count; i++) {
155 parms.size = pg_size;
156 parms.alignment = TF_EM_PAGE_ALIGNMENT;
158 if (tfp_calloc(&parms) != 0)
161 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
162 tp->pg_va_tbl[i] = parms.mem_va;
164 memset(tp->pg_va_tbl[i], 0, pg_size);
171 tf_em_free_pg_tbl(tp);
176 * Allocates EM page tables
179 * Table to allocate pages for
183 * -ENOMEM - Out of memory
186 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
188 struct hcapi_cfa_em_page_tbl *tp;
193 for (i = 0; i < tbl->num_lvl; i++) {
194 tp = &tbl->pg_tbl[i];
196 rc = tf_em_alloc_pg_tbl(tp,
201 "Failed to allocate page table: lvl: %d, rc:%s\n",
207 for (j = 0; j < tp->pg_count; j++) {
209 "EEM: Allocated page table: size %u lvl %d cnt"
214 (uint32_t *)tp->pg_va_tbl[j],
215 (uint32_t *)(uintptr_t)tp->pg_pa_tbl[j]);
221 tf_em_free_page_table(tbl);
226 * Links EM page tables
229 * Pointer to page table
232 * Pointer to the next page table
235 * Flag controlling if the page table is last
238 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
239 struct hcapi_cfa_em_page_tbl *tp_next,
242 uint64_t *pg_pa = tp_next->pg_pa_tbl;
249 for (i = 0; i < tp->pg_count; i++) {
250 pg_va = tp->pg_va_tbl[i];
252 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
253 if (k == tp_next->pg_count - 2 && set_pte_last)
254 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
255 else if (k == tp_next->pg_count - 1 && set_pte_last)
256 valid = PTU_PTE_LAST | PTU_PTE_VALID;
258 valid = PTU_PTE_VALID;
260 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
261 if (++k >= tp_next->pg_count)
268 * Setup a EM page table
271 * Pointer to EM page table
274 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
276 struct hcapi_cfa_em_page_tbl *tp_next;
277 struct hcapi_cfa_em_page_tbl *tp;
278 bool set_pte_last = 0;
281 for (i = 0; i < tbl->num_lvl - 1; i++) {
282 tp = &tbl->pg_tbl[i];
283 tp_next = &tbl->pg_tbl[i + 1];
284 if (i == tbl->num_lvl - 2)
286 tf_em_link_page_table(tp, tp_next, set_pte_last);
289 tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
290 tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
294 * Given the page size, size of each data item (entry size),
295 * and the total number of entries needed, determine the number
296 * of page table levels and the number of data pages required.
305 * Number of entries needed
307 * [out] num_data_pages
308 * Number of pages required
311 * Success - Number of EM page levels required
312 * -ENOMEM - Out of memory
315 tf_em_size_page_tbl_lvl(uint32_t page_size,
317 uint32_t num_entries,
318 uint64_t *num_data_pages)
320 uint64_t lvl_data_size = page_size;
321 int lvl = TF_PT_LVL_0;
325 data_size = (uint64_t)num_entries * entry_size;
327 while (lvl_data_size < data_size) {
330 if (lvl == TF_PT_LVL_1)
331 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
333 else if (lvl == TF_PT_LVL_2)
334 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
335 MAX_PAGE_PTRS(page_size) * page_size;
340 *num_data_pages = roundup(data_size, page_size) / page_size;
346 * Return the number of page table pages needed to
347 * reference the given number of next level pages.
353 * Size of each EM page
356 * Number of EM page table pages
359 tf_em_page_tbl_pgcnt(uint32_t num_pages,
362 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
363 MAX_PAGE_PTRS(page_size);
368 * Given the number of data pages, page_size and the maximum
369 * number of page table levels (already determined), size
370 * the number of page table pages required at each level.
373 * Max number of levels
375 * [in] num_data_pages
376 * Number of EM data pages
385 tf_em_size_page_tbls(int max_lvl,
386 uint64_t num_data_pages,
390 if (max_lvl == TF_PT_LVL_0) {
391 page_cnt[TF_PT_LVL_0] = num_data_pages;
392 } else if (max_lvl == TF_PT_LVL_1) {
393 page_cnt[TF_PT_LVL_1] = num_data_pages;
394 page_cnt[TF_PT_LVL_0] =
395 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
396 } else if (max_lvl == TF_PT_LVL_2) {
397 page_cnt[TF_PT_LVL_2] = num_data_pages;
398 page_cnt[TF_PT_LVL_1] =
399 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
400 page_cnt[TF_PT_LVL_0] =
401 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
408 * Size the EM table based on capabilities
415 * - EINVAL - Parameter error
416 * - ENOMEM - Out of memory
419 tf_em_size_table(struct hcapi_cfa_em_table *tbl)
421 uint64_t num_data_pages;
424 uint32_t num_entries;
425 uint32_t cnt = TF_EM_MIN_ENTRIES;
427 /* Ignore entry if both size and number are zero */
428 if (!tbl->entry_size && !tbl->num_entries)
431 /* If only one is set then error */
432 if (!tbl->entry_size || !tbl->num_entries)
435 /* Determine number of page table levels and the number
436 * of data pages needed to process the given eem table.
438 if (tbl->type == TF_RECORD_TABLE) {
440 * For action records just a memory size is provided. Work
441 * backwards to resolve to number of entries
443 num_entries = tbl->num_entries / tbl->entry_size;
444 if (num_entries < TF_EM_MIN_ENTRIES) {
445 num_entries = TF_EM_MIN_ENTRIES;
447 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
452 num_entries = tbl->num_entries;
455 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
460 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
462 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
463 tbl->type, (uint64_t)num_entries * tbl->entry_size,
468 tbl->num_lvl = max_lvl + 1;
469 tbl->num_data_pages = num_data_pages;
471 /* Determine the number of pages needed at each level */
472 page_cnt = tbl->page_cnt;
473 memset(page_cnt, 0, sizeof(tbl->page_cnt));
474 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
477 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
479 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
481 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
483 page_cnt[TF_PT_LVL_0],
484 page_cnt[TF_PT_LVL_1],
485 page_cnt[TF_PT_LVL_2]);
491 * Unregisters EM Ctx in Firmware
494 * Pointer to a TruFlow handle
497 * Pointer to a table scope control block
500 * Receive or transmit direction
503 tf_em_ctx_unreg(struct tf *tfp,
504 struct tf_tbl_scope_cb *tbl_scope_cb,
507 struct hcapi_cfa_em_ctx_mem_info *ctxp =
508 &tbl_scope_cb->em_ctx_info[dir];
509 struct hcapi_cfa_em_table *tbl;
512 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
513 tbl = &ctxp->em_tables[i];
515 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
516 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
517 tf_em_free_page_table(tbl);
523 * Registers EM Ctx in Firmware
526 * Pointer to a TruFlow handle
529 * Pointer to a table scope control block
532 * Receive or transmit direction
536 * -ENOMEM - Out of Memory
539 tf_em_ctx_reg(struct tf *tfp,
540 struct tf_tbl_scope_cb *tbl_scope_cb,
543 struct hcapi_cfa_em_ctx_mem_info *ctxp =
544 &tbl_scope_cb->em_ctx_info[dir];
545 struct hcapi_cfa_em_table *tbl;
549 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
550 tbl = &ctxp->em_tables[i];
552 if (tbl->num_entries && tbl->entry_size) {
553 rc = tf_em_size_table(tbl);
558 rc = tf_em_alloc_page_table(tbl);
562 tf_em_setup_page_table(tbl);
563 rc = tf_msg_em_mem_rgtr(tfp,
565 TF_EM_PAGE_SIZE_ENUM,
575 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
580 * Validates EM number of entries requested
583 * Pointer to table scope control block to be populated
586 * Pointer to input parameters
590 * -EINVAL - Parameter error
593 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
594 struct tf_alloc_tbl_scope_parms *parms)
598 if (parms->rx_mem_size_in_mb != 0) {
599 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
600 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
602 uint32_t num_entries = (parms->rx_mem_size_in_mb *
603 TF_MEGABYTE) / (key_b + action_b);
605 if (num_entries < TF_EM_MIN_ENTRIES) {
606 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
608 parms->rx_mem_size_in_mb);
612 cnt = TF_EM_MIN_ENTRIES;
613 while (num_entries > cnt &&
614 cnt <= TF_EM_MAX_ENTRIES)
617 if (cnt > TF_EM_MAX_ENTRIES) {
618 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
620 (parms->tx_num_flows_in_k * TF_KILOBYTE));
624 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
626 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
628 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
629 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
631 "EEM: Invalid number of Rx flows "
632 "requested:%u max:%u\n",
633 parms->rx_num_flows_in_k * TF_KILOBYTE,
634 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
638 /* must be a power-of-2 supported value
639 * in the range 32K - 128M
641 cnt = TF_EM_MIN_ENTRIES;
642 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
643 cnt <= TF_EM_MAX_ENTRIES)
646 if (cnt > TF_EM_MAX_ENTRIES) {
648 "EEM: Invalid number of Rx requested: %u\n",
649 (parms->rx_num_flows_in_k * TF_KILOBYTE));
654 if (parms->tx_mem_size_in_mb != 0) {
655 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
656 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
658 uint32_t num_entries = (parms->tx_mem_size_in_mb *
659 (TF_KILOBYTE * TF_KILOBYTE)) /
662 if (num_entries < TF_EM_MIN_ENTRIES) {
664 "EEM: Insufficient memory requested:%uMB\n",
665 parms->rx_mem_size_in_mb);
669 cnt = TF_EM_MIN_ENTRIES;
670 while (num_entries > cnt &&
671 cnt <= TF_EM_MAX_ENTRIES)
674 if (cnt > TF_EM_MAX_ENTRIES) {
676 "EEM: Invalid number of Tx requested: %u\n",
677 (parms->tx_num_flows_in_k * TF_KILOBYTE));
681 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
683 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
685 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
686 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
688 "EEM: Invalid number of Tx flows "
689 "requested:%u max:%u\n",
690 (parms->tx_num_flows_in_k * TF_KILOBYTE),
691 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
695 cnt = TF_EM_MIN_ENTRIES;
696 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
697 cnt <= TF_EM_MAX_ENTRIES)
700 if (cnt > TF_EM_MAX_ENTRIES) {
702 "EEM: Invalid number of Tx requested: %u\n",
703 (parms->tx_num_flows_in_k * TF_KILOBYTE));
708 if (parms->rx_num_flows_in_k != 0 &&
709 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
711 "EEM: Rx key size required: %u\n",
712 (parms->rx_max_key_sz_in_bits));
716 if (parms->tx_num_flows_in_k != 0 &&
717 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
719 "EEM: Tx key size required: %u\n",
720 (parms->tx_max_key_sz_in_bits));
724 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
725 parms->rx_num_flows_in_k * TF_KILOBYTE;
726 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
727 parms->rx_max_key_sz_in_bits / 8;
729 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
730 parms->rx_num_flows_in_k * TF_KILOBYTE;
731 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
732 parms->rx_max_key_sz_in_bits / 8;
734 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
735 parms->rx_num_flows_in_k * TF_KILOBYTE;
736 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
737 parms->rx_max_action_entry_sz_in_bits / 8;
739 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries =
743 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
744 parms->tx_num_flows_in_k * TF_KILOBYTE;
745 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
746 parms->tx_max_key_sz_in_bits / 8;
748 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
749 parms->tx_num_flows_in_k * TF_KILOBYTE;
750 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
751 parms->tx_max_key_sz_in_bits / 8;
753 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
754 parms->tx_num_flows_in_k * TF_KILOBYTE;
755 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
756 parms->tx_max_action_entry_sz_in_bits / 8;
758 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries =
765 * Internal function to set a Table Entry. Supports all internal Table Types
768 * Pointer to TruFlow handle
771 * Pointer to input parameters
775 * -EINVAL - Parameter error
778 tf_set_tbl_entry_internal(struct tf *tfp,
779 struct tf_set_tbl_entry_parms *parms)
784 struct bitalloc *session_pool;
785 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
787 /* Lookup the pool using the table type of the element */
788 rc = tf_rm_lookup_tbl_type_pool(tfs,
792 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
798 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
799 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
800 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
802 "%s, Type not supported, type:%d\n",
803 tf_dir_2_str(parms->dir),
808 /* Adjust the returned index/offset as there is no guarantee
809 * that the start is 0 at time of RM allocation
811 tf_rm_convert_index(tfs,
814 TF_RM_CONVERT_RM_BASE,
818 /* Verify that the entry has been previously allocated */
819 id = ba_inuse(session_pool, index);
822 "%s, Invalid or not allocated index, type:%d, idx:%d\n",
823 tf_dir_2_str(parms->dir),
830 rc = tf_msg_set_tbl_entry(tfp,
833 parms->data_sz_in_bytes,
838 "%s, Set failed, type:%d, rc:%s\n",
839 tf_dir_2_str(parms->dir),
848 * Internal function to get a Table Entry. Supports all Table Types
849 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
852 * Pointer to TruFlow handle
855 * Pointer to input parameters
859 * -EINVAL - Parameter error
862 tf_get_tbl_entry_internal(struct tf *tfp,
863 struct tf_get_tbl_entry_parms *parms)
868 struct bitalloc *session_pool;
869 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
871 /* Lookup the pool using the table type of the element */
872 rc = tf_rm_lookup_tbl_type_pool(tfs,
876 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
882 /* Adjust the returned index/offset as there is no guarantee
883 * that the start is 0 at time of RM allocation
885 tf_rm_convert_index(tfs,
888 TF_RM_CONVERT_RM_BASE,
892 /* Verify that the entry has been previously allocated */
893 id = ba_inuse(session_pool, index);
896 "%s, Invalid or not allocated index, type:%d, idx:%d\n",
897 tf_dir_2_str(parms->dir),
904 rc = tf_msg_get_tbl_entry(tfp,
907 parms->data_sz_in_bytes,
912 "%s, Get failed, type:%d, rc:%s\n",
913 tf_dir_2_str(parms->dir),
922 * Internal function to get a Table Entry. Supports all Table Types
923 * except the TF_TBL_TYPE_EXT as that is handled as a table scope.
926 * Pointer to TruFlow handle
929 * Pointer to input parameters
933 * -EINVAL - Parameter error
936 tf_bulk_get_tbl_entry_internal(struct tf *tfp,
937 struct tf_bulk_get_tbl_entry_parms *parms)
942 struct bitalloc *session_pool;
943 struct tf_session *tfs = (struct tf_session *)(tfp->session->core_data);
945 /* Lookup the pool using the table type of the element */
946 rc = tf_rm_lookup_tbl_type_pool(tfs,
950 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
954 index = parms->starting_idx;
957 * Adjust the returned index/offset as there is no guarantee
958 * that the start is 0 at time of RM allocation
960 tf_rm_convert_index(tfs,
963 TF_RM_CONVERT_RM_BASE,
967 /* Verify that the entry has been previously allocated */
968 id = ba_inuse(session_pool, index);
971 "%s, Invalid or not allocated index, type:%d, starting_idx:%d\n",
972 tf_dir_2_str(parms->dir),
979 rc = tf_msg_bulk_get_tbl_entry(tfp, parms);
982 "%s, Bulk get failed, type:%d, rc:%s\n",
983 tf_dir_2_str(parms->dir),
993 * Allocate Tbl entry from the Shadow DB. Shadow DB is searched for
994 * the requested entry. If found the ref count is incremente and
1000 * Allocation parameters
1003 * 0 - Success, entry found and ref count incremented
1004 * -ENOENT - Failure, entry not found
1007 tf_alloc_tbl_entry_shadow(struct tf_session *tfs __rte_unused,
1008 struct tf_alloc_tbl_entry_parms *parms __rte_unused)
1011 "%s, Entry Alloc with search not supported\n",
1012 tf_dir_2_str(parms->dir));
1018 * Free Tbl entry from the Shadow DB. Shadow DB is searched for
1019 * the requested entry. If found the ref count is decremente and
1020 * new ref_count returned.
1023 * Pointer to session
1025 * Allocation parameters
1028 * 0 - Success, entry found and ref count decremented
1029 * -ENOENT - Failure, entry not found
1032 tf_free_tbl_entry_shadow(struct tf_session *tfs,
1033 struct tf_free_tbl_entry_parms *parms)
1036 "%s, Entry Free with search not supported\n",
1037 tf_dir_2_str(parms->dir));
1041 #endif /* TF_SHADOW */
1044 * Create External Tbl pool of memory indexes.
1049 * pointer to the table scope
1051 * number of entries to write
1052 * [in] entry_sz_bytes
1053 * size of each entry
1056 * 0 - Success, entry allocated - no search support
1057 * -ENOMEM -EINVAL -EOPNOTSUPP
1058 * - Failure, entry not allocated, out of resources
1061 tf_create_tbl_pool_external(enum tf_dir dir,
1062 struct tf_tbl_scope_cb *tbl_scope_cb,
1063 uint32_t num_entries,
1064 uint32_t entry_sz_bytes)
1066 struct tfp_calloc_parms parms;
1070 struct stack *pool = &tbl_scope_cb->ext_act_pool[dir];
1072 parms.nitems = num_entries;
1073 parms.size = sizeof(uint32_t);
1074 parms.alignment = 0;
1076 if (tfp_calloc(&parms) != 0) {
1077 TFP_DRV_LOG(ERR, "%s: TBL: external pool failure %s\n",
1078 tf_dir_2_str(dir), strerror(ENOMEM));
1082 /* Create empty stack
1084 rc = stack_init(num_entries, parms.mem_va, pool);
1087 TFP_DRV_LOG(ERR, "%s: TBL: stack init failure %s\n",
1088 tf_dir_2_str(dir), strerror(-rc));
1092 /* Save the malloced memory address so that it can
1093 * be freed when the table scope is freed.
1095 tbl_scope_cb->ext_act_pool_mem[dir] = (uint32_t *)parms.mem_va;
1097 /* Fill pool with indexes in reverse
1099 j = (num_entries - 1) * entry_sz_bytes;
1101 for (i = 0; i < num_entries; i++) {
1102 rc = stack_push(pool, j);
1104 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
1105 tf_dir_2_str(dir), strerror(-rc));
1110 TFP_DRV_LOG(ERR, "%d TBL: invalid offset (%d)\n",
1114 j -= entry_sz_bytes;
1117 if (!stack_is_full(pool)) {
1119 TFP_DRV_LOG(ERR, "%s TBL: stack failure %s\n",
1120 tf_dir_2_str(dir), strerror(-rc));
1125 tfp_free((void *)parms.mem_va);
1130 * Destroy External Tbl pool of memory indexes.
1135 * pointer to the table scope
1139 tf_destroy_tbl_pool_external(enum tf_dir dir,
1140 struct tf_tbl_scope_cb *tbl_scope_cb)
1142 uint32_t *ext_act_pool_mem =
1143 tbl_scope_cb->ext_act_pool_mem[dir];
1145 tfp_free(ext_act_pool_mem);
1149 * Allocate External Tbl entry from the Session Pool.
1152 * Pointer to Truflow Handle
1154 * Allocation parameters
1157 * 0 - Success, entry allocated - no search support
1158 * -ENOMEM -EINVAL -EOPNOTSUPP
1159 * - Failure, entry not allocated, out of resources
1162 tf_alloc_tbl_entry_pool_external(struct tf *tfp,
1163 struct tf_alloc_tbl_entry_parms *parms)
1167 struct tf_session *tfs;
1168 struct tf_tbl_scope_cb *tbl_scope_cb;
1171 TF_CHECK_PARMS_SESSION(tfp, parms);
1173 tfs = (struct tf_session *)(tfp->session->core_data);
1175 /* Get the pool info from the table scope
1177 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1179 if (tbl_scope_cb == NULL) {
1181 "%s, table scope not allocated\n",
1182 tf_dir_2_str(parms->dir));
1185 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1187 /* Allocate an element
1189 rc = stack_pop(pool, &index);
1193 "%s, Allocation failed, type:%d\n",
1194 tf_dir_2_str(parms->dir),
1203 * Allocate Internal Tbl entry from the Session Pool.
1206 * Pointer to Truflow Handle
1208 * Allocation parameters
1211 * 0 - Success, entry found and ref count decremented
1212 * -ENOMEM - Failure, entry not allocated, out of resources
1215 tf_alloc_tbl_entry_pool_internal(struct tf *tfp,
1216 struct tf_alloc_tbl_entry_parms *parms)
1222 struct bitalloc *session_pool;
1223 struct tf_session *tfs;
1225 TF_CHECK_PARMS_SESSION(tfp, parms);
1227 tfs = (struct tf_session *)(tfp->session->core_data);
1229 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1230 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1231 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1232 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1233 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1234 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1235 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1237 "%s, Type not supported, type:%d\n",
1238 tf_dir_2_str(parms->dir),
1243 /* Lookup the pool using the table type of the element */
1244 rc = tf_rm_lookup_tbl_type_pool(tfs,
1248 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1252 id = ba_alloc(session_pool);
1254 free_cnt = ba_free_count(session_pool);
1257 "%s, Allocation failed, type:%d, free:%d\n",
1258 tf_dir_2_str(parms->dir),
1264 /* Adjust the returned index/offset as there is no guarantee
1265 * that the start is 0 at time of RM allocation
1267 tf_rm_convert_index(tfs,
1270 TF_RM_CONVERT_ADD_BASE,
1278 * Free External Tbl entry to the session pool.
1281 * Pointer to Truflow Handle
1283 * Allocation parameters
1286 * 0 - Success, entry freed
1288 * - Failure, entry not successfully freed for these reasons
1294 tf_free_tbl_entry_pool_external(struct tf *tfp,
1295 struct tf_free_tbl_entry_parms *parms)
1298 struct tf_session *tfs;
1300 struct tf_tbl_scope_cb *tbl_scope_cb;
1303 TF_CHECK_PARMS_SESSION(tfp, parms);
1305 tfs = (struct tf_session *)(tfp->session->core_data);
1307 /* Get the pool info from the table scope
1309 tbl_scope_cb = tbl_scope_cb_find(tfs, parms->tbl_scope_id);
1311 if (tbl_scope_cb == NULL) {
1313 "%s, table scope error\n",
1314 tf_dir_2_str(parms->dir));
1317 pool = &tbl_scope_cb->ext_act_pool[parms->dir];
1321 rc = stack_push(pool, index);
1325 "%s, consistency error, stack full, type:%d, idx:%d\n",
1326 tf_dir_2_str(parms->dir),
1334 * Free Internal Tbl entry from the Session Pool.
1337 * Pointer to Truflow Handle
1339 * Allocation parameters
1342 * 0 - Success, entry found and ref count decremented
1343 * -ENOMEM - Failure, entry not allocated, out of resources
1346 tf_free_tbl_entry_pool_internal(struct tf *tfp,
1347 struct tf_free_tbl_entry_parms *parms)
1351 struct bitalloc *session_pool;
1352 struct tf_session *tfs;
1355 TF_CHECK_PARMS_SESSION(tfp, parms);
1357 tfs = (struct tf_session *)(tfp->session->core_data);
1359 if (parms->type != TF_TBL_TYPE_FULL_ACT_RECORD &&
1360 parms->type != TF_TBL_TYPE_ACT_SP_SMAC &&
1361 parms->type != TF_TBL_TYPE_ACT_SP_SMAC_IPV4 &&
1362 parms->type != TF_TBL_TYPE_ACT_ENCAP_8B &&
1363 parms->type != TF_TBL_TYPE_ACT_ENCAP_16B &&
1364 parms->type != TF_TBL_TYPE_ACT_ENCAP_64B &&
1365 parms->type != TF_TBL_TYPE_ACT_STATS_64) {
1367 "%s, Type not supported, type:%d\n",
1368 tf_dir_2_str(parms->dir),
1373 /* Lookup the pool using the table type of the element */
1374 rc = tf_rm_lookup_tbl_type_pool(tfs,
1378 /* Error logging handled by tf_rm_lookup_tbl_type_pool */
1384 /* Adjust the returned index/offset as there is no guarantee
1385 * that the start is 0 at time of RM allocation
1387 tf_rm_convert_index(tfs,
1390 TF_RM_CONVERT_RM_BASE,
1394 /* Check if element was indeed allocated */
1395 id = ba_inuse_free(session_pool, index);
1398 "%s, Element not previously alloc'ed, type:%d, idx:%d\n",
1399 tf_dir_2_str(parms->dir),
1408 /* API defined in tf_em.h */
1409 struct tf_tbl_scope_cb *
1410 tbl_scope_cb_find(struct tf_session *session,
1411 uint32_t tbl_scope_id)
1415 /* Check that id is valid */
1416 i = ba_inuse(session->tbl_scope_pool_rx, tbl_scope_id);
1420 for (i = 0; i < TF_NUM_TBL_SCOPE; i++) {
1421 if (session->tbl_scopes[i].tbl_scope_id == tbl_scope_id)
1422 return &session->tbl_scopes[i];
1428 /* API defined in tf_core.h */
1430 tf_free_eem_tbl_scope_cb(struct tf *tfp,
1431 struct tf_free_tbl_scope_parms *parms)
1435 struct tf_tbl_scope_cb *tbl_scope_cb;
1436 struct tf_session *session;
1438 session = (struct tf_session *)(tfp->session->core_data);
1440 tbl_scope_cb = tbl_scope_cb_find(session,
1441 parms->tbl_scope_id);
1443 if (tbl_scope_cb == NULL) {
1444 TFP_DRV_LOG(ERR, "Table scope error\n");
1448 /* Free Table control block */
1449 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1451 /* free table scope locks */
1452 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1453 /* Free associated external pools
1455 tf_destroy_tbl_pool_external(dir,
1459 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1461 /* free table scope and all associated resources */
1462 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1468 /* API defined in tf_em.h */
1470 tf_alloc_eem_tbl_scope(struct tf *tfp,
1471 struct tf_alloc_tbl_scope_parms *parms)
1475 struct tf_tbl_scope_cb *tbl_scope_cb;
1476 struct hcapi_cfa_em_table *em_tables;
1478 struct tf_session *session;
1479 struct tf_free_tbl_scope_parms free_parms;
1481 session = (struct tf_session *)tfp->session->core_data;
1483 /* Get Table Scope control block from the session pool */
1484 index = ba_alloc(session->tbl_scope_pool_rx);
1486 TFP_DRV_LOG(ERR, "EEM: Unable to allocate table scope "
1491 tbl_scope_cb = &session->tbl_scopes[index];
1492 tbl_scope_cb->index = index;
1493 tbl_scope_cb->tbl_scope_id = index;
1494 parms->tbl_scope_id = index;
1496 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1497 rc = tf_msg_em_qcaps(tfp,
1499 &tbl_scope_cb->em_caps[dir]);
1502 "EEM: Unable to query for EEM capability,"
1510 * Validate and setup table sizes
1512 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1515 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1517 * Allocate tables and signal configuration to FW
1519 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1522 "EEM: Unable to register for EEM ctx,"
1528 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1529 rc = tf_msg_em_cfg(tfp,
1530 em_tables[TF_KEY0_TABLE].num_entries,
1531 em_tables[TF_KEY0_TABLE].ctx_id,
1532 em_tables[TF_KEY1_TABLE].ctx_id,
1533 em_tables[TF_RECORD_TABLE].ctx_id,
1534 em_tables[TF_EFC_TABLE].ctx_id,
1535 parms->hw_flow_cache_flush_timer,
1539 "TBL: Unable to configure EEM in firmware"
1545 rc = tf_msg_em_op(tfp,
1547 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1551 "EEM: Unable to enable EEM in firmware"
1557 /* Allocate the pool of offsets of the external memory.
1558 * Initially, this is a single fixed size pool for all external
1559 * actions related to a single table scope.
1561 rc = tf_create_tbl_pool_external(dir,
1563 em_tables[TF_RECORD_TABLE].num_entries,
1564 em_tables[TF_RECORD_TABLE].entry_size);
1567 "%s TBL: Unable to allocate idx pools %s\n",
1577 free_parms.tbl_scope_id = index;
1578 tf_free_eem_tbl_scope_cb(tfp, &free_parms);
1582 /* Free Table control block */
1583 ba_free(session->tbl_scope_pool_rx, tbl_scope_cb->index);
1587 /* API defined in tf_core.h */
1589 tf_set_tbl_entry(struct tf *tfp,
1590 struct tf_set_tbl_entry_parms *parms)
1593 struct tf_tbl_scope_cb *tbl_scope_cb;
1594 struct tf_session *session;
1596 TF_CHECK_PARMS_SESSION(tfp, parms);
1598 if (parms->data == NULL) {
1600 "%s, invalid parms->data\n",
1601 tf_dir_2_str(parms->dir));
1605 if (parms->type == TF_TBL_TYPE_EXT) {
1607 uint32_t offset = parms->idx;
1608 uint32_t tbl_scope_id;
1610 session = (struct tf_session *)(tfp->session->core_data);
1612 tbl_scope_id = parms->tbl_scope_id;
1614 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1616 "%s, Table scope not allocated\n",
1617 tf_dir_2_str(parms->dir));
1621 /* Get the table scope control block associated with the
1624 tbl_scope_cb = tbl_scope_cb_find(session, tbl_scope_id);
1626 if (tbl_scope_cb == NULL) {
1628 "%s, table scope error\n",
1629 tf_dir_2_str(parms->dir));
1633 /* External table, implicitly the Action table */
1634 base_addr = (void *)(uintptr_t)
1635 hcapi_get_table_page(&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE], offset);
1637 if (base_addr == NULL) {
1639 "%s, Base address lookup failed\n",
1640 tf_dir_2_str(parms->dir));
1644 offset %= TF_EM_PAGE_SIZE;
1645 rte_memcpy((char *)base_addr + offset,
1647 parms->data_sz_in_bytes);
1649 /* Internal table type processing */
1650 rc = tf_set_tbl_entry_internal(tfp, parms);
1653 "%s, Set failed, type:%d, rc:%s\n",
1654 tf_dir_2_str(parms->dir),
1663 /* API defined in tf_core.h */
1665 tf_get_tbl_entry(struct tf *tfp,
1666 struct tf_get_tbl_entry_parms *parms)
1670 TF_CHECK_PARMS_SESSION(tfp, parms);
1672 if (parms->type == TF_TBL_TYPE_EXT) {
1673 /* Not supported, yet */
1675 "%s, External table type not supported\n",
1676 tf_dir_2_str(parms->dir));
1680 /* Internal table type processing */
1681 rc = tf_get_tbl_entry_internal(tfp, parms);
1684 "%s, Get failed, type:%d, rc:%s\n",
1685 tf_dir_2_str(parms->dir),
1693 /* API defined in tf_core.h */
1695 tf_bulk_get_tbl_entry(struct tf *tfp,
1696 struct tf_bulk_get_tbl_entry_parms *parms)
1700 TF_CHECK_PARMS_SESSION(tfp, parms);
1702 if (parms->type == TF_TBL_TYPE_EXT) {
1703 /* Not supported, yet */
1705 "%s, External table type not supported\n",
1706 tf_dir_2_str(parms->dir));
1710 /* Internal table type processing */
1711 rc = tf_bulk_get_tbl_entry_internal(tfp, parms);
1714 "%s, Bulk get failed, type:%d, rc:%s\n",
1715 tf_dir_2_str(parms->dir),
1723 /* API defined in tf_core.h */
1725 tf_alloc_tbl_scope(struct tf *tfp,
1726 struct tf_alloc_tbl_scope_parms *parms)
1730 TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
1732 rc = tf_alloc_eem_tbl_scope(tfp, parms);
1737 /* API defined in tf_core.h */
1739 tf_free_tbl_scope(struct tf *tfp,
1740 struct tf_free_tbl_scope_parms *parms)
1744 TF_CHECK_PARMS_SESSION_NO_DIR(tfp, parms);
1746 /* free table scope and all associated resources */
1747 rc = tf_free_eem_tbl_scope_cb(tfp, parms);
1752 /* API defined in tf_core.h */
1754 tf_alloc_tbl_entry(struct tf *tfp,
1755 struct tf_alloc_tbl_entry_parms *parms)
1758 #if (TF_SHADOW == 1)
1759 struct tf_session *tfs;
1760 #endif /* TF_SHADOW */
1762 TF_CHECK_PARMS_SESSION(tfp, parms);
1764 * No shadow copy support for external tables, allocate and return
1766 if (parms->type == TF_TBL_TYPE_EXT) {
1767 rc = tf_alloc_tbl_entry_pool_external(tfp, parms);
1771 #if (TF_SHADOW == 1)
1772 tfs = (struct tf_session *)(tfp->session->core_data);
1774 /* Search the Shadow DB for requested element. If not found go
1775 * allocate one from the Session Pool
1777 if (parms->search_enable && tfs->shadow_copy) {
1778 rc = tf_alloc_tbl_entry_shadow(tfs, parms);
1779 /* Entry found and parms populated with return data */
1783 #endif /* TF_SHADOW */
1785 rc = tf_alloc_tbl_entry_pool_internal(tfp, parms);
1787 TFP_DRV_LOG(ERR, "%s, Alloc failed, rc:%s\n",
1788 tf_dir_2_str(parms->dir),
1794 /* API defined in tf_core.h */
1796 tf_free_tbl_entry(struct tf *tfp,
1797 struct tf_free_tbl_entry_parms *parms)
1800 #if (TF_SHADOW == 1)
1801 struct tf_session *tfs;
1802 #endif /* TF_SHADOW */
1804 TF_CHECK_PARMS_SESSION(tfp, parms);
1807 * No shadow of external tables so just free the entry
1809 if (parms->type == TF_TBL_TYPE_EXT) {
1810 rc = tf_free_tbl_entry_pool_external(tfp, parms);
1814 #if (TF_SHADOW == 1)
1815 tfs = (struct tf_session *)(tfp->session->core_data);
1817 /* Search the Shadow DB for requested element. If not found go
1818 * allocate one from the Session Pool
1820 if (parms->search_enable && tfs->shadow_copy) {
1821 rc = tf_free_tbl_entry_shadow(tfs, parms);
1822 /* Entry free'ed and parms populated with return data */
1826 #endif /* TF_SHADOW */
1828 rc = tf_free_tbl_entry_pool_internal(tfp, parms);
1831 TFP_DRV_LOG(ERR, "%s, Alloc failed, rc:%s\n",
1832 tf_dir_2_str(parms->dir),
1839 tf_dump_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
1840 struct hcapi_cfa_em_page_tbl *tp_next)
1847 printf("pg_count:%d pg_size:0x%x\n",
1850 for (i = 0; i < tp->pg_count; i++) {
1851 pg_va = tp->pg_va_tbl[i];
1852 printf("\t%p\n", (void *)pg_va);
1853 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
1854 printf("\t\t%p\n", (void *)(uintptr_t)pg_va[j]);
1855 if (((pg_va[j] & 0x7) ==
1856 tfp_cpu_to_le_64(PTU_PTE_LAST |
1860 if (!(pg_va[j] & tfp_cpu_to_le_64(PTU_PTE_VALID))) {
1861 printf("** Invalid entry **\n");
1865 if (++k >= tp_next->pg_count) {
1866 printf("** Shouldn't get here **\n");
1873 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id);
1875 void tf_dump_dma(struct tf *tfp, uint32_t tbl_scope_id)
1877 struct tf_session *session;
1878 struct tf_tbl_scope_cb *tbl_scope_cb;
1879 struct hcapi_cfa_em_page_tbl *tp;
1880 struct hcapi_cfa_em_page_tbl *tp_next;
1881 struct hcapi_cfa_em_table *tbl;
1886 printf("called %s\n", __func__);
1888 /* find session struct */
1889 session = (struct tf_session *)tfp->session->core_data;
1891 /* find control block for table scope */
1892 tbl_scope_cb = tbl_scope_cb_find(session,
1894 if (tbl_scope_cb == NULL)
1895 PMD_DRV_LOG(ERR, "No table scope\n");
1897 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1898 printf("Direction %s:\n", (dir == TF_DIR_RX ? "Rx" : "Tx"));
1900 for (j = TF_KEY0_TABLE; j < TF_MAX_TABLE; j++) {
1901 tbl = &tbl_scope_cb->em_ctx_info[dir].em_tables[j];
1903 ("Table: j:%d type:%d num_entries:%d entry_size:0x%x num_lvl:%d ",
1909 if (tbl->pg_tbl[0].pg_va_tbl &&
1910 tbl->pg_tbl[0].pg_pa_tbl)
1912 tbl->pg_tbl[0].pg_va_tbl[0],
1913 (void *)(uintptr_t)tbl->pg_tbl[0].pg_pa_tbl[0]);
1914 for (i = 0; i < tbl->num_lvl - 1; i++) {
1915 printf("Level:%d\n", i);
1916 tp = &tbl->pg_tbl[i];
1917 tp_next = &tbl->pg_tbl[i + 1];
1918 tf_dump_link_page_table(tp, tp_next);