1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Broadcom
9 #include <rte_common.h>
10 #include <rte_errno.h>
15 #include "tf_common.h"
17 #include "tf_em_common.h"
21 #include "tf_ext_flow_handle.h"
26 #define PTU_PTE_VALID 0x1UL
27 #define PTU_PTE_LAST 0x2UL
28 #define PTU_PTE_NEXT_TO_LAST 0x4UL
30 /* Number of pointers per page_size */
31 #define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
33 #define TF_EM_PG_SZ_4K (1 << 12)
34 #define TF_EM_PG_SZ_8K (1 << 13)
35 #define TF_EM_PG_SZ_64K (1 << 16)
36 #define TF_EM_PG_SZ_256K (1 << 18)
37 #define TF_EM_PG_SZ_1M (1 << 20)
38 #define TF_EM_PG_SZ_2M (1 << 21)
39 #define TF_EM_PG_SZ_4M (1 << 22)
40 #define TF_EM_PG_SZ_1G (1 << 30)
42 #define TF_EM_CTX_ID_INVALID 0xFFFF
44 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
45 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
50 extern void *eem_db[TF_DIR_MAX];
52 extern struct tf_tbl_scope_cb tbl_scopes[TF_NUM_TBL_SCOPE];
55 * Function to free a page table
58 * Pointer to the page table to free
61 tf_em_free_pg_tbl(struct hcapi_cfa_em_page_tbl *tp)
65 for (i = 0; i < tp->pg_count; i++) {
66 if (!tp->pg_va_tbl[i]) {
68 "No mapping for page: %d table: %016" PRIu64 "\n",
70 (uint64_t)(uintptr_t)tp);
74 tfp_free(tp->pg_va_tbl[i]);
75 tp->pg_va_tbl[i] = NULL;
79 tfp_free(tp->pg_va_tbl);
81 tfp_free(tp->pg_pa_tbl);
86 * Function to free an EM table
89 * Pointer to the EM table to free
92 tf_em_free_page_table(struct hcapi_cfa_em_table *tbl)
94 struct hcapi_cfa_em_page_tbl *tp;
97 for (i = 0; i < tbl->num_lvl; i++) {
100 "EEM: Freeing page table: size %u lvl %d cnt %u\n",
105 tf_em_free_pg_tbl(tp);
109 tbl->l0_dma_addr = 0;
111 tbl->num_data_pages = 0;
115 * Allocation of page tables
118 * Pointer to a TruFlow handle
121 * Page count to allocate
128 * -ENOMEM - Out of memory
131 tf_em_alloc_pg_tbl(struct hcapi_cfa_em_page_tbl *tp,
136 struct tfp_calloc_parms parms;
138 parms.nitems = pg_count;
139 parms.size = sizeof(void *);
142 if (tfp_calloc(&parms) != 0)
145 tp->pg_va_tbl = parms.mem_va;
147 if (tfp_calloc(&parms) != 0) {
148 tfp_free(tp->pg_va_tbl);
152 tp->pg_pa_tbl = parms.mem_va;
155 tp->pg_size = pg_size;
157 for (i = 0; i < pg_count; i++) {
159 parms.size = pg_size;
160 parms.alignment = TF_EM_PAGE_ALIGNMENT;
162 if (tfp_calloc(&parms) != 0)
165 tp->pg_pa_tbl[i] = (uintptr_t)parms.mem_pa;
166 tp->pg_va_tbl[i] = parms.mem_va;
168 memset(tp->pg_va_tbl[i], 0, pg_size);
175 tf_em_free_pg_tbl(tp);
180 * Allocates EM page tables
183 * Table to allocate pages for
187 * -ENOMEM - Out of memory
190 tf_em_alloc_page_table(struct hcapi_cfa_em_table *tbl)
192 struct hcapi_cfa_em_page_tbl *tp;
197 for (i = 0; i < tbl->num_lvl; i++) {
198 tp = &tbl->pg_tbl[i];
200 rc = tf_em_alloc_pg_tbl(tp,
205 "Failed to allocate page table: lvl: %d, rc:%s\n",
211 for (j = 0; j < tp->pg_count; j++) {
213 "EEM: Allocated page table: size %u lvl %d cnt"
218 (void *)(uintptr_t)tp->pg_va_tbl[j],
219 (void *)(uintptr_t)tp->pg_pa_tbl[j]);
225 tf_em_free_page_table(tbl);
230 * Links EM page tables
233 * Pointer to page table
236 * Pointer to the next page table
239 * Flag controlling if the page table is last
242 tf_em_link_page_table(struct hcapi_cfa_em_page_tbl *tp,
243 struct hcapi_cfa_em_page_tbl *tp_next,
246 uint64_t *pg_pa = tp_next->pg_pa_tbl;
253 for (i = 0; i < tp->pg_count; i++) {
254 pg_va = tp->pg_va_tbl[i];
256 for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
257 if (k == tp_next->pg_count - 2 && set_pte_last)
258 valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
259 else if (k == tp_next->pg_count - 1 && set_pte_last)
260 valid = PTU_PTE_LAST | PTU_PTE_VALID;
262 valid = PTU_PTE_VALID;
264 pg_va[j] = tfp_cpu_to_le_64(pg_pa[k] | valid);
265 if (++k >= tp_next->pg_count)
272 * Setup a EM page table
275 * Pointer to EM page table
278 tf_em_setup_page_table(struct hcapi_cfa_em_table *tbl)
280 struct hcapi_cfa_em_page_tbl *tp_next;
281 struct hcapi_cfa_em_page_tbl *tp;
282 bool set_pte_last = 0;
285 for (i = 0; i < tbl->num_lvl - 1; i++) {
286 tp = &tbl->pg_tbl[i];
287 tp_next = &tbl->pg_tbl[i + 1];
288 if (i == tbl->num_lvl - 2)
290 tf_em_link_page_table(tp, tp_next, set_pte_last);
293 tbl->l0_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_va_tbl[0];
294 tbl->l0_dma_addr = tbl->pg_tbl[TF_PT_LVL_0].pg_pa_tbl[0];
298 * Given the page size, size of each data item (entry size),
299 * and the total number of entries needed, determine the number
300 * of page table levels and the number of data pages required.
309 * Number of entries needed
311 * [out] num_data_pages
312 * Number of pages required
315 * Success - Number of EM page levels required
316 * -ENOMEM - Out of memory
319 tf_em_size_page_tbl_lvl(uint32_t page_size,
321 uint32_t num_entries,
322 uint64_t *num_data_pages)
324 uint64_t lvl_data_size = page_size;
325 int lvl = TF_PT_LVL_0;
329 data_size = (uint64_t)num_entries * entry_size;
331 while (lvl_data_size < data_size) {
334 if (lvl == TF_PT_LVL_1)
335 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
337 else if (lvl == TF_PT_LVL_2)
338 lvl_data_size = (uint64_t)MAX_PAGE_PTRS(page_size) *
339 MAX_PAGE_PTRS(page_size) * page_size;
344 *num_data_pages = roundup(data_size, page_size) / page_size;
350 * Return the number of page table pages needed to
351 * reference the given number of next level pages.
357 * Size of each EM page
360 * Number of EM page table pages
363 tf_em_page_tbl_pgcnt(uint32_t num_pages,
366 return roundup(num_pages, MAX_PAGE_PTRS(page_size)) /
367 MAX_PAGE_PTRS(page_size);
372 * Given the number of data pages, page_size and the maximum
373 * number of page table levels (already determined), size
374 * the number of page table pages required at each level.
377 * Max number of levels
379 * [in] num_data_pages
380 * Number of EM data pages
389 tf_em_size_page_tbls(int max_lvl,
390 uint64_t num_data_pages,
394 if (max_lvl == TF_PT_LVL_0) {
395 page_cnt[TF_PT_LVL_0] = num_data_pages;
396 } else if (max_lvl == TF_PT_LVL_1) {
397 page_cnt[TF_PT_LVL_1] = num_data_pages;
398 page_cnt[TF_PT_LVL_0] =
399 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
400 } else if (max_lvl == TF_PT_LVL_2) {
401 page_cnt[TF_PT_LVL_2] = num_data_pages;
402 page_cnt[TF_PT_LVL_1] =
403 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_2], page_size);
404 page_cnt[TF_PT_LVL_0] =
405 tf_em_page_tbl_pgcnt(page_cnt[TF_PT_LVL_1], page_size);
412 * Size the EM table based on capabilities
419 * - EINVAL - Parameter error
420 * - ENOMEM - Out of memory
423 tf_em_size_table(struct hcapi_cfa_em_table *tbl)
425 uint64_t num_data_pages;
428 uint32_t num_entries;
429 uint32_t cnt = TF_EM_MIN_ENTRIES;
431 /* Ignore entry if both size and number are zero */
432 if (!tbl->entry_size && !tbl->num_entries)
435 /* If only one is set then error */
436 if (!tbl->entry_size || !tbl->num_entries)
439 /* Determine number of page table levels and the number
440 * of data pages needed to process the given eem table.
442 if (tbl->type == TF_RECORD_TABLE) {
444 * For action records just a memory size is provided. Work
445 * backwards to resolve to number of entries
447 num_entries = tbl->num_entries / tbl->entry_size;
448 if (num_entries < TF_EM_MIN_ENTRIES) {
449 num_entries = TF_EM_MIN_ENTRIES;
451 while (num_entries > cnt && cnt <= TF_EM_MAX_ENTRIES)
456 num_entries = tbl->num_entries;
459 max_lvl = tf_em_size_page_tbl_lvl(TF_EM_PAGE_SIZE,
464 TFP_DRV_LOG(WARNING, "EEM: Failed to size page table levels\n");
466 "table: %d data-sz: %016" PRIu64 " page-sz: %u\n",
467 tbl->type, (uint64_t)num_entries * tbl->entry_size,
472 tbl->num_lvl = max_lvl + 1;
473 tbl->num_data_pages = num_data_pages;
475 /* Determine the number of pages needed at each level */
476 page_cnt = tbl->page_cnt;
477 memset(page_cnt, 0, sizeof(tbl->page_cnt));
478 tf_em_size_page_tbls(max_lvl, num_data_pages, TF_EM_PAGE_SIZE,
481 TFP_DRV_LOG(INFO, "EEM: Sized page table: %d\n", tbl->type);
483 "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64 " l0: %u l1: %u l2: %u\n",
485 (uint64_t)num_data_pages * TF_EM_PAGE_SIZE,
487 page_cnt[TF_PT_LVL_0],
488 page_cnt[TF_PT_LVL_1],
489 page_cnt[TF_PT_LVL_2]);
495 * Unregisters EM Ctx in Firmware
498 * Pointer to a TruFlow handle
501 * Pointer to a table scope control block
504 * Receive or transmit direction
507 tf_em_ctx_unreg(struct tf *tfp,
508 struct tf_tbl_scope_cb *tbl_scope_cb,
511 struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
512 struct hcapi_cfa_em_table *tbl;
515 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
516 tbl = &ctxp->em_tables[i];
518 if (tbl->num_entries != 0 && tbl->entry_size != 0) {
519 tf_msg_em_mem_unrgtr(tfp, &tbl->ctx_id);
520 tf_em_free_page_table(tbl);
526 * Registers EM Ctx in Firmware
529 * Pointer to a TruFlow handle
532 * Pointer to a table scope control block
535 * Receive or transmit direction
539 * -ENOMEM - Out of Memory
542 tf_em_ctx_reg(struct tf *tfp,
543 struct tf_tbl_scope_cb *tbl_scope_cb,
546 struct hcapi_cfa_em_ctx_mem_info *ctxp = &tbl_scope_cb->em_ctx_info[dir];
547 struct hcapi_cfa_em_table *tbl;
551 for (i = TF_KEY0_TABLE; i < TF_MAX_TABLE; i++) {
552 tbl = &ctxp->em_tables[i];
554 if (tbl->num_entries && tbl->entry_size) {
555 rc = tf_em_size_table(tbl);
560 rc = tf_em_alloc_page_table(tbl);
564 tf_em_setup_page_table(tbl);
565 rc = tf_msg_em_mem_rgtr(tfp,
567 TF_EM_PAGE_SIZE_ENUM,
577 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
583 * Validates EM number of entries requested
586 * Pointer to table scope control block to be populated
589 * Pointer to input parameters
593 * -EINVAL - Parameter error
596 tf_em_validate_num_entries(struct tf_tbl_scope_cb *tbl_scope_cb,
597 struct tf_alloc_tbl_scope_parms *parms)
601 if (parms->rx_mem_size_in_mb != 0) {
602 uint32_t key_b = 2 * ((parms->rx_max_key_sz_in_bits / 8) + 1);
603 uint32_t action_b = ((parms->rx_max_action_entry_sz_in_bits / 8)
605 uint32_t num_entries = (parms->rx_mem_size_in_mb *
606 TF_MEGABYTE) / (key_b + action_b);
608 if (num_entries < TF_EM_MIN_ENTRIES) {
609 TFP_DRV_LOG(ERR, "EEM: Insufficient memory requested:"
611 parms->rx_mem_size_in_mb);
615 cnt = TF_EM_MIN_ENTRIES;
616 while (num_entries > cnt &&
617 cnt <= TF_EM_MAX_ENTRIES)
620 if (cnt > TF_EM_MAX_ENTRIES) {
621 TFP_DRV_LOG(ERR, "EEM: Invalid number of Tx requested: "
623 (parms->tx_num_flows_in_k * TF_KILOBYTE));
627 parms->rx_num_flows_in_k = cnt / TF_KILOBYTE;
629 if ((parms->rx_num_flows_in_k * TF_KILOBYTE) <
631 (parms->rx_num_flows_in_k * TF_KILOBYTE) >
632 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported) {
634 "EEM: Invalid number of Rx flows "
635 "requested:%u max:%u\n",
636 parms->rx_num_flows_in_k * TF_KILOBYTE,
637 tbl_scope_cb->em_caps[TF_DIR_RX].max_entries_supported);
641 /* must be a power-of-2 supported value
642 * in the range 32K - 128M
644 cnt = TF_EM_MIN_ENTRIES;
645 while ((parms->rx_num_flows_in_k * TF_KILOBYTE) != cnt &&
646 cnt <= TF_EM_MAX_ENTRIES)
649 if (cnt > TF_EM_MAX_ENTRIES) {
651 "EEM: Invalid number of Rx requested: %u\n",
652 (parms->rx_num_flows_in_k * TF_KILOBYTE));
657 if (parms->tx_mem_size_in_mb != 0) {
658 uint32_t key_b = 2 * (parms->tx_max_key_sz_in_bits / 8 + 1);
659 uint32_t action_b = ((parms->tx_max_action_entry_sz_in_bits / 8)
661 uint32_t num_entries = (parms->tx_mem_size_in_mb *
662 (TF_KILOBYTE * TF_KILOBYTE)) /
665 if (num_entries < TF_EM_MIN_ENTRIES) {
667 "EEM: Insufficient memory requested:%uMB\n",
668 parms->rx_mem_size_in_mb);
672 cnt = TF_EM_MIN_ENTRIES;
673 while (num_entries > cnt &&
674 cnt <= TF_EM_MAX_ENTRIES)
677 if (cnt > TF_EM_MAX_ENTRIES) {
679 "EEM: Invalid number of Tx requested: %u\n",
680 (parms->tx_num_flows_in_k * TF_KILOBYTE));
684 parms->tx_num_flows_in_k = cnt / TF_KILOBYTE;
686 if ((parms->tx_num_flows_in_k * TF_KILOBYTE) <
688 (parms->tx_num_flows_in_k * TF_KILOBYTE) >
689 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported) {
691 "EEM: Invalid number of Tx flows "
692 "requested:%u max:%u\n",
693 (parms->tx_num_flows_in_k * TF_KILOBYTE),
694 tbl_scope_cb->em_caps[TF_DIR_TX].max_entries_supported);
698 cnt = TF_EM_MIN_ENTRIES;
699 while ((parms->tx_num_flows_in_k * TF_KILOBYTE) != cnt &&
700 cnt <= TF_EM_MAX_ENTRIES)
703 if (cnt > TF_EM_MAX_ENTRIES) {
705 "EEM: Invalid number of Tx requested: %u\n",
706 (parms->tx_num_flows_in_k * TF_KILOBYTE));
711 if (parms->rx_num_flows_in_k != 0 &&
712 (parms->rx_max_key_sz_in_bits / 8 == 0)) {
714 "EEM: Rx key size required: %u\n",
715 (parms->rx_max_key_sz_in_bits));
719 if (parms->tx_num_flows_in_k != 0 &&
720 (parms->tx_max_key_sz_in_bits / 8 == 0)) {
722 "EEM: Tx key size required: %u\n",
723 (parms->tx_max_key_sz_in_bits));
727 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].num_entries =
728 parms->rx_num_flows_in_k * TF_KILOBYTE;
729 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY0_TABLE].entry_size =
730 parms->rx_max_key_sz_in_bits / 8;
732 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].num_entries =
733 parms->rx_num_flows_in_k * TF_KILOBYTE;
734 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_KEY1_TABLE].entry_size =
735 parms->rx_max_key_sz_in_bits / 8;
737 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].num_entries =
738 parms->rx_num_flows_in_k * TF_KILOBYTE;
739 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_RECORD_TABLE].entry_size =
740 parms->rx_max_action_entry_sz_in_bits / 8;
742 tbl_scope_cb->em_ctx_info[TF_DIR_RX].em_tables[TF_EFC_TABLE].num_entries = 0;
745 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].num_entries =
746 parms->tx_num_flows_in_k * TF_KILOBYTE;
747 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY0_TABLE].entry_size =
748 parms->tx_max_key_sz_in_bits / 8;
750 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].num_entries =
751 parms->tx_num_flows_in_k * TF_KILOBYTE;
752 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_KEY1_TABLE].entry_size =
753 parms->tx_max_key_sz_in_bits / 8;
755 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].num_entries =
756 parms->tx_num_flows_in_k * TF_KILOBYTE;
757 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_RECORD_TABLE].entry_size =
758 parms->tx_max_action_entry_sz_in_bits / 8;
760 tbl_scope_cb->em_ctx_info[TF_DIR_TX].em_tables[TF_EFC_TABLE].num_entries = 0;
765 /** insert EEM entry API
769 * TF_ERR - unable to get lock
771 * insert callback returns:
773 * TF_ERR_EM_DUP - key is already in table
776 tf_insert_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
777 struct tf_insert_em_entry_parms *parms)
784 struct cfa_p4_eem_64b_entry key_entry;
786 enum hcapi_cfa_em_table_type table_type;
788 struct hcapi_cfa_hwop op;
789 struct hcapi_cfa_key_tbl key_tbl;
790 struct hcapi_cfa_key_data key_obj;
791 struct hcapi_cfa_key_loc key_loc;
795 /* Get mask to use on hash */
796 mask = tf_em_get_key_mask(tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE].num_entries);
802 dump_raw((uint8_t *)parms->key, TF_HW_EM_KEY_MAX_SIZE + 4, "In Key");
805 big_hash = hcapi_cfa_key_hash((uint64_t *)parms->key,
806 (TF_HW_EM_KEY_MAX_SIZE + 4) * 8);
807 key0_hash = (uint32_t)(big_hash >> 32);
808 key1_hash = (uint32_t)(big_hash & 0xFFFFFFFF);
810 key0_index = key0_hash & mask;
811 key1_index = key1_hash & mask;
814 TFP_DRV_LOG(DEBUG, "Key0 hash:0x%08x\n", key0_hash);
815 TFP_DRV_LOG(DEBUG, "Key1 hash:0x%08x\n", key1_hash);
818 * Use the "result" arg to populate all of the key entry then
819 * store the byte swapped "raw" entry in a local copy ready
820 * for insertion in to the table.
822 tf_em_create_key_entry((struct cfa_p4_eem_entry_hdr *)parms->em_record,
823 ((uint8_t *)parms->key),
827 * Try to add to Key0 table, if that does not work then
828 * try the key1 table.
831 op.opcode = HCAPI_CFA_HWOPS_ADD;
832 key_tbl.base0 = (uint8_t *)
833 &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY0_TABLE];
834 key_tbl.page_size = TF_EM_PAGE_SIZE;
835 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
836 key_obj.data = (uint8_t *)&key_entry;
837 key_obj.size = TF_EM_KEY_RECORD_SIZE;
839 rc = hcapi_cfa_key_hw_op(&op,
845 table_type = TF_KEY0_TABLE;
849 key_tbl.base0 = (uint8_t *)
850 &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_KEY1_TABLE];
851 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
853 rc = hcapi_cfa_key_hw_op(&op,
860 table_type = TF_KEY1_TABLE;
866 TF_SET_FLOW_ID(parms->flow_id,
868 TF_GFID_TABLE_EXTERNAL,
870 TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle,
881 /** delete EEM hash entry API
885 * -EINVAL - parameter error
886 * TF_NO_SESSION - bad session ID
887 * TF_ERR_TBL_SCOPE - invalid table scope
888 * TF_ERR_TBL_IF - invalid table interface
890 * insert callback returns
892 * TF_NO_EM_MATCH - entry not found
895 tf_delete_eem_entry(struct tf_tbl_scope_cb *tbl_scope_cb,
896 struct tf_delete_em_entry_parms *parms)
898 enum hcapi_cfa_em_table_type hash_type;
900 struct hcapi_cfa_hwop op;
901 struct hcapi_cfa_key_tbl key_tbl;
902 struct hcapi_cfa_key_data key_obj;
903 struct hcapi_cfa_key_loc key_loc;
906 if (parms->flow_handle == 0)
909 TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(parms->flow_handle, hash_type);
910 TF_GET_INDEX_FROM_FLOW_HANDLE(parms->flow_handle, index);
912 op.opcode = HCAPI_CFA_HWOPS_DEL;
913 key_tbl.base0 = (uint8_t *)
914 &tbl_scope_cb->em_ctx_info[parms->dir].em_tables[(hash_type == 0 ?
917 key_tbl.page_size = TF_EM_PAGE_SIZE;
918 key_obj.offset = index * TF_EM_KEY_RECORD_SIZE;
920 key_obj.size = TF_EM_KEY_RECORD_SIZE;
922 rc = hcapi_cfa_key_hw_op(&op,
933 /** insert EM hash entry API
940 tf_em_insert_ext_entry(struct tf *tfp __rte_unused,
941 struct tf_insert_em_entry_parms *parms)
943 struct tf_tbl_scope_cb *tbl_scope_cb;
945 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
946 if (tbl_scope_cb == NULL) {
947 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
951 return tf_insert_eem_entry(tbl_scope_cb, parms);
954 /** Delete EM hash entry API
961 tf_em_delete_ext_entry(struct tf *tfp __rte_unused,
962 struct tf_delete_em_entry_parms *parms)
964 struct tf_tbl_scope_cb *tbl_scope_cb;
966 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
967 if (tbl_scope_cb == NULL) {
968 TFP_DRV_LOG(ERR, "Invalid tbl_scope_cb\n");
972 return tf_delete_eem_entry(tbl_scope_cb, parms);
976 tf_em_ext_host_alloc(struct tf *tfp,
977 struct tf_alloc_tbl_scope_parms *parms)
981 struct tf_tbl_scope_cb *tbl_scope_cb;
982 struct hcapi_cfa_em_table *em_tables;
983 struct tf_free_tbl_scope_parms free_parms;
984 struct tf_rm_allocate_parms aparms = { 0 };
985 struct tf_rm_free_parms fparms = { 0 };
987 /* Get Table Scope control block from the session pool */
988 aparms.rm_db = eem_db[TF_DIR_RX];
989 aparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
990 aparms.index = (uint32_t *)&parms->tbl_scope_id;
991 rc = tf_rm_allocate(&aparms);
994 "Failed to allocate table scope\n");
998 tbl_scope_cb = &tbl_scopes[parms->tbl_scope_id];
999 tbl_scope_cb->index = parms->tbl_scope_id;
1000 tbl_scope_cb->tbl_scope_id = parms->tbl_scope_id;
1002 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1003 rc = tf_msg_em_qcaps(tfp,
1005 &tbl_scope_cb->em_caps[dir]);
1008 "EEM: Unable to query for EEM capability,"
1016 * Validate and setup table sizes
1018 if (tf_em_validate_num_entries(tbl_scope_cb, parms))
1021 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1023 * Allocate tables and signal configuration to FW
1025 rc = tf_em_ctx_reg(tfp, tbl_scope_cb, dir);
1028 "EEM: Unable to register for EEM ctx,"
1034 em_tables = tbl_scope_cb->em_ctx_info[dir].em_tables;
1035 rc = tf_msg_em_cfg(tfp,
1036 em_tables[TF_KEY0_TABLE].num_entries,
1037 em_tables[TF_KEY0_TABLE].ctx_id,
1038 em_tables[TF_KEY1_TABLE].ctx_id,
1039 em_tables[TF_RECORD_TABLE].ctx_id,
1040 em_tables[TF_EFC_TABLE].ctx_id,
1041 parms->hw_flow_cache_flush_timer,
1045 "TBL: Unable to configure EEM in firmware"
1051 rc = tf_msg_em_op(tfp,
1053 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE);
1057 "EEM: Unable to enable EEM in firmware"
1063 /* Allocate the pool of offsets of the external memory.
1064 * Initially, this is a single fixed size pool for all external
1065 * actions related to a single table scope.
1067 rc = tf_create_tbl_pool_external(dir,
1069 em_tables[TF_RECORD_TABLE].num_entries,
1070 em_tables[TF_RECORD_TABLE].entry_size);
1073 "%s TBL: Unable to allocate idx pools %s\n",
1083 free_parms.tbl_scope_id = parms->tbl_scope_id;
1084 tf_em_ext_host_free(tfp, &free_parms);
1088 /* Free Table control block */
1089 fparms.rm_db = eem_db[TF_DIR_RX];
1090 fparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
1091 fparms.index = parms->tbl_scope_id;
1092 tf_rm_free(&fparms);
1097 tf_em_ext_host_free(struct tf *tfp,
1098 struct tf_free_tbl_scope_parms *parms)
1102 struct tf_tbl_scope_cb *tbl_scope_cb;
1103 struct tf_rm_free_parms aparms = { 0 };
1105 tbl_scope_cb = tbl_scope_cb_find(parms->tbl_scope_id);
1107 if (tbl_scope_cb == NULL) {
1108 TFP_DRV_LOG(ERR, "Table scope error\n");
1112 /* Free Table control block */
1113 aparms.rm_db = eem_db[TF_DIR_RX];
1114 aparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
1115 aparms.index = parms->tbl_scope_id;
1116 rc = tf_rm_free(&aparms);
1119 "Failed to free table scope\n");
1122 /* free table scope locks */
1123 for (dir = 0; dir < TF_DIR_MAX; dir++) {
1124 /* Free associated external pools
1126 tf_destroy_tbl_pool_external(dir,
1130 HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE);
1132 /* free table scope and all associated resources */
1133 tf_em_ctx_unreg(tfp, tbl_scope_cb, dir);
1136 tbl_scopes[parms->tbl_scope_id].tbl_scope_id = TF_TBL_SCOPE_INVALID;
1141 * Sets the specified external table type element.
1143 * This API sets the specified element data
1146 * Pointer to TF handle
1149 * Pointer to table set parameters
1152 * - (0) if successful.
1153 * - (-EINVAL) on failure.
1155 int tf_tbl_ext_host_set(struct tf *tfp,
1156 struct tf_tbl_set_parms *parms)
1159 struct tf_tbl_scope_cb *tbl_scope_cb;
1160 uint32_t tbl_scope_id;
1161 struct hcapi_cfa_hwop op;
1162 struct hcapi_cfa_key_tbl key_tbl;
1163 struct hcapi_cfa_key_data key_obj;
1164 struct hcapi_cfa_key_loc key_loc;
1166 TF_CHECK_PARMS2(tfp, parms);
1168 if (parms->data == NULL) {
1170 "%s, invalid parms->data\n",
1171 tf_dir_2_str(parms->dir));
1175 tbl_scope_id = parms->tbl_scope_id;
1177 if (tbl_scope_id == TF_TBL_SCOPE_INVALID) {
1179 "%s, Table scope not allocated\n",
1180 tf_dir_2_str(parms->dir));
1184 /* Get the table scope control block associated with the
1187 tbl_scope_cb = tbl_scope_cb_find(tbl_scope_id);
1189 if (tbl_scope_cb == NULL) {
1191 "%s, table scope error\n",
1192 tf_dir_2_str(parms->dir));
1196 op.opcode = HCAPI_CFA_HWOPS_PUT;
1198 (uint8_t *)&tbl_scope_cb->em_ctx_info[parms->dir].em_tables[TF_RECORD_TABLE];
1199 key_tbl.page_size = TF_EM_PAGE_SIZE;
1200 key_obj.offset = parms->idx;
1201 key_obj.data = parms->data;
1202 key_obj.size = parms->data_sz_in_bytes;
1204 rc = hcapi_cfa_key_hw_op(&op,