1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
21 #include "tf_ext_flow_handle.h"
26 #define PTU_PTE_VALID 0x1UL
27 #define PTU_PTE_LAST 0x2UL
28 #define PTU_PTE_NEXT_TO_LAST 0x4UL
30 /* Number of pointers per page_size */
31 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
33 #define TF_EM_PG_SZ_4K (1 << 12)
34 #define TF_EM_PG_SZ_8K (1 << 13)
35 #define TF_EM_PG_SZ_64K (1 << 16)
36 #define TF_EM_PG_SZ_256K (1 << 18)
37 #define TF_EM_PG_SZ_1M (1 << 20)
38 #define TF_EM_PG_SZ_2M (1 << 21)
39 #define TF_EM_PG_SZ_4M (1 << 22)
40 #define TF_EM_PG_SZ_1G (1 << 30)
42 #define TF_EM_CTX_ID_INVALID 0xFFFF
44 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
45 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
50 extern void *eem_db[TF_DIR_MAX];
53 * Function to free a page table
56 * Pointer to the page table to free
59 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
63 for (i = 0; i < tp->pg_count; i++) {
64 if (!tp->pg_va_tbl[i]) {
66 "No mapping for page: %d table: %016" PRIu64 "\n",
68 (uint64_t)(uintptr_t)tp);
72 tfp_free(tp->pg_va_tbl[i]);
73 tp->pg_va_tbl[i] = NULL;
77 tfp_free(tp->pg_va_tbl);
79 tfp_free(tp->pg_pa_tbl);
84 * Function to free an EM table
87 * Pointer to the EM table to free
90 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
92 struct hcapi_cfa_em_page_tbl *tp;
95 for (i = 0; i < tbl->num_lvl; i++) {
98 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
103 tf_em_free_pg_tbl(tp);
107 tbl->l0_dma_addr = 0;
109 tbl->num_data_pages = 0;
113 * Allocation of page tables
116 * Pointer to a TruFlow handle
119 * Page count to allocate
126 * -ENOMEM - Out of memory
129 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
134 struct tfp_calloc_parms parms;
136 parms.nitems = pg_count;
137 parms.size = sizeof(void *);
140 if (tfp_calloc(&parms) != 0)
143 tp->pg_va_tbl = parms.mem_va;
145 if (tfp_calloc(&parms) != 0) {
146 tfp_free(tp->pg_va_tbl);
150 tp->pg_pa_tbl = parms.mem_va;
153 tp->pg_size = pg_size;
155 for (i = 0; i < pg_count; i++) {
157 parms.size = pg_size;
158 parms.alignment = TF_EM_PAGE_ALIGNMENT;
160 if (tfp_calloc(&parms) != 0)
163 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
164 tp->pg_va_tbl[i] = parms.mem_va;
166 memset(tp->pg_va_tbl[i], 0, pg_size);
173 tf_em_free_pg_tbl(tp);
178 * Allocates EM page tables
181 * Table to allocate pages for
185 * -ENOMEM - Out of memory
188 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
190 struct hcapi_cfa_em_page_tbl *tp;
195 for (i = 0; i < tbl->num_lvl; i++) {
196 tp = &tbl->pg_tbl[i];
198 rc = tf_em_alloc_pg_tbl(tp,
203 "Failed to allocate page table: lvl: %d, rc:%s\n",
209 for (j = 0; j < tp->pg_count; j++) {
211 "EEM: Allocated page table: size %u lvl %d cnt"
216 (void *)(uintptr_t)tp->pg_va_tbl[j],
217 (void *)(uintptr_t)tp->pg_pa_tbl[j]);
223 tf_em_free_page_table(tbl);
228 * Links EM page tables
231 * Pointer to page table
234 * Pointer to the next page table
237 * Flag controlling if the page table is last
240 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
241 struct hcapi_cfa_em_page_tbl *tp_next,
244 uint64_t *pg_pa = tp_next->pg_pa_tbl;
251 for (i = 0; i < tp->pg_count; i++) {
252 pg_va = tp->pg_va_tbl[i];
254 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
255 if (k == tp_next->pg_count - 2 && set_pte_last)
256 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
257 else if (k == tp_next->pg_count - 1 && set_pte_last)
258 valid = PTU_PTE_LAST | PTU_PTE_VALID;
260 valid = PTU_PTE_VALID;
262 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
263 if (++k >= tp_next->pg_count)
270 * Setup a EM page table
273 * Pointer to EM page table
276 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
278 struct hcapi_cfa_em_page_tbl *tp_next;
279 struct hcapi_cfa_em_page_tbl *tp;
280 bool set_pte_last = 0;
283 for (i = 0; i < tbl->num_lvl - 1; i++) {
284 tp = &tbl->pg_tbl[i];
285 tp_next = &tbl->pg_tbl[i + 1];
286 if (i == tbl->num_lvl - 2)
288 tf_em_link_page_table(tp, tp_next, set_pte_last);
291 tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
292 tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
296 * Given the page size, size of each data item (entry size),
297 * and the total number of entries needed, determine the number
298 * of page table levels and the number of data pages required.
307 * Number of entries needed
309 * [out] num_data_pages
310 * Number of pages required
313 * Success - Number of EM page levels required
314 * -ENOMEM - Out of memory
317 tf_em_size_page_tbl_lvl(uint32_t page_size,
319 uint32_t num_entries,
320 uint64_t *num_data_pages)
322 uint64_t lvl_data_size = page_size;
323 int lvl = TF_PT_LVL_0;
327 data_size = (uint64_t)num_entries * entry_size;
329 while (lvl_data_size < data_size) {
332 if (lvl == TF_PT_LVL_1)
333 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
335 else if (lvl == TF_PT_LVL_2)
336 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
337 MAX_PAGE_PTRS(page_size) * page_size;
342 *num_data_pages = roundup(data_size, page_size) / page_size;
348 * Return the number of page table pages needed to
349 * reference the given number of next level pages.
355 * Size of each EM page
358 * Number of EM page table pages
361 tf_em_page_tbl_pgcnt(uint32_t num_pages,
364 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
365 MAX_PAGE_PTRS(page_size);
370 * Given the number of data pages, page_size and the maximum
371 * number of page table levels (already determined), size
372 * the number of page table pages required at each level.
375 * Max number of levels
377 * [in] num_data_pages
378 * Number of EM data pages
387 tf_em_size_page_tbls(int max_lvl,
388 uint64_t num_data_pages,
392 if (max_lvl == TF_PT_LVL_0) {
393 page_cnt[TF_PT_LVL_0] = num_data_pages;
394 } else if (max_lvl == TF_PT_LVL_1) {
395 page_cnt[TF_PT_LVL_1] = num_data_pages;
396 page_cnt[TF_PT_LVL_0] =
397 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
398 } else if (max_lvl == TF_PT_LVL_2) {
399 page_cnt[TF_PT_LVL_2] = num_data_pages;
400 page_cnt[TF_PT_LVL_1] =
401 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
402 page_cnt[TF_PT_LVL_0] =
403 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
410 * Size the EM table based on capabilities
417 * - EINVAL - Parameter error
418 * - ENOMEM - Out of memory
421 tf_em_size_table(struct hcapi_cfa_em_table *tbl)
423 uint64_t num_data_pages;
426 uint32_t num_entries;
427 uint32_t cnt = TF_EM_MIN_ENTRIES;
429 /* Ignore entry if both size and number are zero */
430 if (!tbl->entry_size && !tbl->num_entries)
433 /* If only one is set then error */
434 if (!tbl->entry_size || !tbl->num_entries)
437 /* Determine number of page table levels and the number
438 * of data pages needed to process the given eem table.
440 if (tbl->type == TF_RECORD_TABLE) {
442 * For action records just a memory size is provided. Work
443 * backwards to resolve to number of entries
445 num_entries = tbl->num_entries / tbl->entry_size;
446 if (num_entries < TF_EM_MIN_ENTRIES) {
447 num_entries = TF_EM_MIN_ENTRIES;
449 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
454 num_entries = tbl->num_entries;
457 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
462 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
464 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
465 tbl->type, (uint64_t)num_entries * tbl->entry_size,
470 tbl->num_lvl = max_lvl + 1;
471 tbl->num_data_pages = num_data_pages;
473 /* Determine the number of pages needed at each level */
474 page_cnt = tbl->page_cnt;
475 memset(page_cnt, 0, sizeof(tbl->page_cnt));
476 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
479 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
481 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
483 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
485 page_cnt[TF_PT_LVL_0],
486 page_cnt[TF_PT_LVL_1],
487 page_cnt[TF_PT_LVL_2]);
493 * Unregisters EM Ctx in Firmware
496 * Pointer to a TruFlow handle
499 * Pointer to a table scope control block
502 * Receive or transmit direction
505 tf_em_ctx_unreg(struct tf *tfp,
506 struct tf_tbl_scope_cb *tbl_scope_cb,
509 struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
510 struct hcapi_cfa_em_table *tbl;
513 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
514 tbl = &ctxp->em_tables[i];
516 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
517 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
518 tf_em_free_page_table(tbl);
524 * Registers EM Ctx in Firmware
527 * Pointer to a TruFlow handle
530 * Pointer to a table scope control block
533 * Receive or transmit direction
537 * -ENOMEM - Out of Memory
540 tf_em_ctx_reg(struct tf *tfp,
541 struct tf_tbl_scope_cb *tbl_scope_cb,
544 struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
545 struct hcapi_cfa_em_table *tbl;
549 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
550 tbl = &ctxp->em_tables[i];
552 if (tbl->num_entries && tbl->entry_size) {
553 rc = tf_em_size_table(tbl);
558 rc = tf_em_alloc_page_table(tbl);
562 tf_em_setup_page_table(tbl);
563 rc = tf_msg_em_mem_rgtr(tfp,
565 TF_EM_PAGE_SIZE_ENUM,
575 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
581 * Validates EM number of entries requested
584 * Pointer to table scope control block to be populated
587 * Pointer to input parameters
591 * -EINVAL - Parameter error
594 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
595 struct tf_alloc_tbl_scope_parms *parms)
599 if (parms->rx_mem_size_in_mb != 0) {
600 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
601 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
603 uint32_t num_entries = (parms->rx_mem_size_in_mb *
604 TF_MEGABYTE) / (key_b + action_b);
606 if (num_entries < TF_EM_MIN_ENTRIES) {
607 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
609 parms->rx_mem_size_in_mb);
613 cnt = TF_EM_MIN_ENTRIES;
614 while (num_entries > cnt &&
615 cnt <= TF_EM_MAX_ENTRIES)
618 if (cnt > TF_EM_MAX_ENTRIES) {
619 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
621 (parms->tx_num_flows_in_k * TF_KILOBYTE));
625 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
627 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
629 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
630 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
632 "EEM: Invalid number of Rx flows "
633 "requested:%u max:%u\n",
634 parms->rx_num_flows_in_k * TF_KILOBYTE,
635 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
639 /* must be a power-of-2 supported value
640 * in the range 32K - 128M
642 cnt = TF_EM_MIN_ENTRIES;
643 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
644 cnt <= TF_EM_MAX_ENTRIES)
647 if (cnt > TF_EM_MAX_ENTRIES) {
649 "EEM: Invalid number of Rx requested: %u\n",
650 (parms->rx_num_flows_in_k * TF_KILOBYTE));
655 if (parms->tx_mem_size_in_mb != 0) {
656 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
657 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
659 uint32_t num_entries = (parms->tx_mem_size_in_mb *
660 (TF_KILOBYTE * TF_KILOBYTE)) /
663 if (num_entries < TF_EM_MIN_ENTRIES) {
665 "EEM: Insufficient memory requested:%uMB\n",
666 parms->rx_mem_size_in_mb);
670 cnt = TF_EM_MIN_ENTRIES;
671 while (num_entries > cnt &&
672 cnt <= TF_EM_MAX_ENTRIES)
675 if (cnt > TF_EM_MAX_ENTRIES) {
677 "EEM: Invalid number of Tx requested: %u\n",
678 (parms->tx_num_flows_in_k * TF_KILOBYTE));
682 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
684 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
686 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
687 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
689 "EEM: Invalid number of Tx flows "
690 "requested:%u max:%u\n",
691 (parms->tx_num_flows_in_k * TF_KILOBYTE),
692 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
696 cnt = TF_EM_MIN_ENTRIES;
697 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
698 cnt <= TF_EM_MAX_ENTRIES)
701 if (cnt > TF_EM_MAX_ENTRIES) {
703 "EEM: Invalid number of Tx requested: %u\n",
704 (parms->tx_num_flows_in_k * TF_KILOBYTE));
709 if (parms->rx_num_flows_in_k != 0 &&
710 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
712 "EEM: Rx key size required: %u\n",
713 (parms->rx_max_key_sz_in_bits));
717 if (parms->tx_num_flows_in_k != 0 &&
718 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
720 "EEM: Tx key size required: %u\n",
721 (parms->tx_max_key_sz_in_bits));
725 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
726 parms->rx_num_flows_in_k * TF_KILOBYTE;
727 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
728 parms->rx_max_key_sz_in_bits / 8;
730 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
731 parms->rx_num_flows_in_k * TF_KILOBYTE;
732 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
733 parms->rx_max_key_sz_in_bits / 8;
735 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
736 parms->rx_num_flows_in_k * TF_KILOBYTE;
737 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
738 parms->rx_max_action_entry_sz_in_bits / 8;
740 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
743 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
744 parms->tx_num_flows_in_k * TF_KILOBYTE;
745 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
746 parms->tx_max_key_sz_in_bits / 8;
748 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
749 parms->tx_num_flows_in_k * TF_KILOBYTE;
750 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
751 parms->tx_max_key_sz_in_bits / 8;
753 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
754 parms->tx_num_flows_in_k * TF_KILOBYTE;
755 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
756 parms->tx_max_action_entry_sz_in_bits / 8;
758 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
763 /** insert EEM entry API
767 * TF_ERR - unable to get lock
769 * insert callback returns:
771 * TF_ERR_EM_DUP - key is already in table
774 tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
775 struct tf_insert_em_entry_parms *parms)
782 struct cfa_p4_eem_64b_entry key_entry;
784 enum hcapi_cfa_em_table_type table_type;
786 struct hcapi_cfa_hwop op;
787 struct hcapi_cfa_key_tbl key_tbl;
788 struct hcapi_cfa_key_data key_obj;
789 struct hcapi_cfa_key_loc key_loc;
793 /* Get mask to use on hash */
794 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
800 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
803 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
804 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
805 key0_hash = (uint32_t)(big_hash >> 32);
806 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
808 key0_index = key0_hash & mask;
809 key1_index = key1_hash & mask;
812 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
813 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
816 * Use the "result" arg to populate all of the key entry then
817 * store the byte swapped "raw" entry in a local copy ready
818 * for insertion in to the table.
820 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
821 ((uint8_t *)parms->key),
825 * Try to add to Key0 table, if that does not work then
826 * try the key1 table.
829 op.opcode = HCAPI_CFA_HWOPS_ADD;
830 key_tbl.base0 = (uint8_t *)
831 &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
832 key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
833 key_obj.data = (uint8_t *)&key_entry;
834 key_obj.size = TF_EM_KEY_RECORD_SIZE;
836 rc = hcapi_cfa_key_hw_op(&op,
842 table_type = TF_KEY0_TABLE;
846 key_tbl.base0 = (uint8_t *)
847 &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
849 (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
851 rc = hcapi_cfa_key_hw_op(&op,
858 table_type = TF_KEY1_TABLE;
864 TF_SET_FLOW_ID(parms->flow_id,
866 TF_GFID_TABLE_EXTERNAL,
868 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
879 /** delete EEM hash entry API
883 * -EINVAL - parameter error
884 * TF_NO_SESSION - bad session ID
885 * TF_ERR_TBL_SCOPE - invalid table scope
886 * TF_ERR_TBL_IF - invalid table interface
888 * insert callback returns
890 * TF_NO_EM_MATCH - entry not found
893 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
894 struct tf_delete_em_entry_parms *parms)
896 enum hcapi_cfa_em_table_type hash_type;
898 struct hcapi_cfa_hwop op;
899 struct hcapi_cfa_key_tbl key_tbl;
900 struct hcapi_cfa_key_data key_obj;
901 struct hcapi_cfa_key_loc key_loc;
904 if (parms->flow_handle == 0)
907 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
908 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
910 op.opcode = HCAPI_CFA_HWOPS_DEL;
911 key_tbl.base0 = (uint8_t *)
912 &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[(hash_type == 0 ?
915 key_obj.offset = (index * TF_EM_KEY_RECORD_SIZE) % TF_EM_PAGE_SIZE;
917 key_obj.size = TF_EM_KEY_RECORD_SIZE;
919 rc = hcapi_cfa_key_hw_op(&op,
930 /** insert EM hash entry API
937 tf_em_insert_ext_entry(struct tf *tfp,
938 struct tf_insert_em_entry_parms *parms)
940 struct tf_tbl_scope_cb *tbl_scope_cb;
943 tbl_scope_cb_find((struct tf_session *)(tfp->session->core_data),
944 parms->tbl_scope_id);
945 if (tbl_scope_cb == NULL) {
946 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
950 return tf_insert_eem_entry(tbl_scope_cb, parms);
953 /** Delete EM hash entry API
960 tf_em_delete_ext_entry(struct tf *tfp,
961 struct tf_delete_em_entry_parms *parms)
963 struct tf_tbl_scope_cb *tbl_scope_cb;
966 tbl_scope_cb_find((struct tf_session *)(tfp->session->core_data),
967 parms->tbl_scope_id);
968 if (tbl_scope_cb == NULL) {
969 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
973 return tf_delete_eem_entry(tbl_scope_cb, parms);
977 tf_em_ext_host_alloc(struct tf *tfp,
978 struct tf_alloc_tbl_scope_parms *parms)
982 struct tf_tbl_scope_cb *tbl_scope_cb;
983 struct hcapi_cfa_em_table *em_tables;
984 struct tf_session *session;
985 struct tf_free_tbl_scope_parms free_parms;
986 struct tf_rm_allocate_parms aparms = { 0 };
987 struct tf_rm_free_parms fparms = { 0 };
989 session = (struct tf_session *)tfp->session->core_data;
991 /* Get Table Scope control block from the session pool */
992 aparms.rm_db = eem_db[TF_DIR_RX];
993 aparms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
994 aparms.index = (uint32_t *)&parms->tbl_scope_id;
995 rc = tf_rm_allocate(&aparms);
998 "Failed to allocate table scope\n");
1002 parms->tbl_scope_id -= TF_HACK_TBL_SCOPE_BASE;
1003 tbl_scope_cb = &session->tbl_scopes[parms->tbl_scope_id];
1004 tbl_scope_cb->index = parms->tbl_scope_id;
1005 tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
1007 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1008 rc = tf_msg_em_qcaps(tfp,
1010 &tbl_scope_cb->em_caps[dir]);
1013 "EEM: Unable to query for EEM capability,"
1021 * Validate and setup table sizes
1023 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1026 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1028 * Allocate tables and signal configuration to FW
1030 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1033 "EEM: Unable to register for EEM ctx,"
1039 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1040 rc = tf_msg_em_cfg(tfp,
1041 em_tables[TF_KEY0_TABLE].num_entries,
1042 em_tables[TF_KEY0_TABLE].ctx_id,
1043 em_tables[TF_KEY1_TABLE].ctx_id,
1044 em_tables[TF_RECORD_TABLE].ctx_id,
1045 em_tables[TF_EFC_TABLE].ctx_id,
1046 parms->hw_flow_cache_flush_timer,
1050 "TBL: Unable to configure EEM in firmware"
1056 rc = tf_msg_em_op(tfp,
1058 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1062 "EEM: Unable to enable EEM in firmware"
1068 /* Allocate the pool of offsets of the external memory.
1069 * Initially, this is a single fixed size pool for all external
1070 * actions related to a single table scope.
1072 rc = tf_create_tbl_pool_external(dir,
1074 em_tables[TF_RECORD_TABLE].num_entries,
1075 em_tables[TF_RECORD_TABLE].entry_size);
1078 "%s TBL: Unable to allocate idx pools %s\n",
1088 free_parms.tbl_scope_id = parms->tbl_scope_id;
1089 tf_em_ext_host_free(tfp, &free_parms);
1093 /* Free Table control block */
1094 fparms.rm_db = eem_db[TF_DIR_RX];
1095 fparms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
1096 fparms.index = parms->tbl_scope_id + TF_HACK_TBL_SCOPE_BASE;
1097 tf_rm_free(&fparms);
1102 tf_em_ext_host_free(struct tf *tfp,
1103 struct tf_free_tbl_scope_parms *parms)
1107 struct tf_tbl_scope_cb *tbl_scope_cb;
1108 struct tf_session *session;
1109 struct tf_rm_free_parms aparms = { 0 };
1111 session = (struct tf_session *)(tfp->session->core_data);
1113 tbl_scope_cb = tbl_scope_cb_find(session,
1114 parms->tbl_scope_id);
1116 if (tbl_scope_cb == NULL) {
1117 TFP_DRV_LOG(ERR, "Table scope error\n");
1121 /* Free Table control block */
1122 aparms.rm_db = eem_db[TF_DIR_RX];
1123 aparms.db_index = 1/**** TYPE TABLE-SCOPE??? ****/;
1124 aparms.index = parms->tbl_scope_id + TF_HACK_TBL_SCOPE_BASE;
1125 rc = tf_rm_free(&aparms);
1128 "Failed to free table scope\n");
1131 /* free table scope locks */
1132 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1133 /* Free associated external pools
1135 tf_destroy_tbl_pool_external(dir,
1139 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1141 /* free table scope and all associated resources */
1142 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);